text stringlengths 6 947k | repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1
value | license stringclasses 15
values | size int64 6 947k | score float64 0 0.34 |
|---|---|---|---|---|---|---|
from __future__ import unicode_literals
#
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
class NotDAG (Exception):
"""Not a directed acyclic graph"""
pass
class CantHappen (Exception):
"""Can't happen"""
pass
| jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/exceptions.py | Python | gpl-3.0 | 311 | 0.006431 |
# -*- coding: utf-8 -*-
from stash.tests.stashtest import StashTestCase
class CowsayTests(StashTestCase):
"""tests for cowsay"""
def test_help(self):
"""test help output"""
output = self.run_command("cowsay --help", exitcode=0)
self.assertIn("cowsay", output)
self.assertIn("--help", output)
self.assertIn("usage:", output)
def test_singleline_1(self):
"""test for correct text in output"""
output = self.run_command("cowsay test", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_singleline_1(self):
"""test for correct text in output"""
output = self.run_command("cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
self.assertNotIn("test", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_stdin_read(self):
"""test 'echo test | cowsay' printing 'test'"""
output = self.run_command("echo test | cowsay", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
def test_stdin_ignore(self):
"""test 'echo test | cowsay Hello, World!' printing 'Hello World!'"""
output = self.run_command("echo test | cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
self.assertNotIn("test", output)
def test_multiline_1(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertNotIn("Hello,\nWorld!", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
def test_multiline_2(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!\\nPython4Ever", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertIn("Python4Ever", output)
self.assertNotIn("Hello,\nWorld!\nPython4Ever", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertIn("|", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
| ywangd/stash | tests/misc/test_cowsay.py | Python | mit | 2,613 | 0.001531 |
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import bisect
import numpy as np
import scipy.stats as sps
import json
from dipde.interfaces.pandas import to_df
from dipde.internals import utilities as util
import logging
logger = logging.getLogger(__name__)
import scipy.sparse as sps
import scipy.sparse.linalg as spsla
class InternalPopulation(object):
"""Population density class
This class encapulates all the details necessary to propagate a population
density equation driven by a combination of recurrent and background
connections. The voltage (spatial) domain discretization is defined by
linear binning from v_min to v_max, in steps of dv (All in units of volts).
The probability densities on this grid are recorded pv, and must always sum
to 1.
Parameters
----------
tau_m : float (default=.02)
Time constant (unit: 1/sec) of neuronal population.
v_min : float (default=-.1)
Minimum of voltage domain (unit: volt).
v_max : float (default=.02)
Maximum of voltage domain (Absorbing boundary), i.e spiking threshold (unit: volt).
dv : float (default=.0001)
Voltage domain discritization size (unit: volt).
record : bool (default=False)
If True, a history of the output firing rate is recorded (firing_rate_record attribute).
curr_firing_rate : float (default=0.0
Initial/Current firing rate of the population (unit: Hz).
update_method : str 'approx' or 'exact' (default='approx')
Method to update pv (exact can be quite slow).
approx_order : int or None (default=None)
Maximum Taylor series expansion order to use when computing update to pv.
tol : float (default=1e-12)
Error tolerance used when computing update to pv.
norm : non-zero int, np.inf, -np.inf, or 'fro' (default=np.inf)
Vector norm used in computation of tol.
**kwargs
Any additional keyword args are stored as metadata (metadata attribute).
Attributes
----------
self.edges : np.array
Vector defining the boundaries of voltage bins.
self.pv : np.array
Vector defining the probability mass in each voltage bin (self.pv.sum() = 1).
self.firing_rate_record : list
List of firing rates recorded during Simulation.
self.t_record : list
List of times that firing rates were recorded during Simulation.
self.leak_flux_matrix : np.array
Matrix that defines the flux between voltage bins.
"""
def __init__(self, rank=0,
tau_m={'distribution':'delta', 'loc':0.02},
v_min=-.1,
v_max=.02,
dv=.0001,
record=True,
initial_firing_rate=0.0,
update_method='approx',
approx_order=None,
tol=1e-14,
norm=np.inf,
p0={'distribution':'delta', 'loc':0.},
metadata={},
firing_rate_record=[],
t_record=[],
update_callback=lambda s:None,
initialize_callback=lambda s:None,
**kwargs):
# Store away inputs:
self.rank = 0
self.tau_m = tau_m
self.p0 = p0
self.v_min = v_min
self.v_max = v_max
self.dv = dv
self.record = record
self.curr_firing_rate = initial_firing_rate
self.update_method = update_method
self.approx_order = approx_order
self.norm = norm
self.update_callback = update_callback
self.initialize_callback = initialize_callback
self.firing_rate_record = [x for x in firing_rate_record]
self.t_record = [x for x in t_record]
assert len(self.firing_rate_record) == len(self.t_record)
if tol is None:
if self.update_method == 'gmres':
self.tol = 1e-5
else:
self.tol = 1e-12
else:
self.tol = tol
# Additional metadata:
util.check_metadata(metadata)
self.metadata = metadata
# Defined in initialization:
self.edges = None
self.pv = None
self.leak_flux_matrix = None
for key in kwargs.keys():
assert key in ['class', 'module']
def initialize(self):
'''Initialize the population at the beginning of a simulation.
In turn, this method:
1) Initializes the voltage edges (self.edges) and probability mass in each bin (self.pv),
2) Creates an initial dictionary of inputs into the population, and
3) Resets the recorder that tracks firing rate during a simulation.
This method is called by the Simulation object (initialization method),
but can also be called by a user when defining an alternative time
stepping loop.
'''
self.initialize_edges()
self.initialize_probability() # TODO: different initialization options
if self.record == True: self.initialize_firing_rate_recorder()
self.initialize_callback(self)
def update(self):
'''Update the population one time step.
This method is called by the Simulation object to update the population
one time step. In turn, this method:
1) Calls the update_total_input_dict method to gather the current strengths of presynaptic input populations,
2) Calls the update_propability_mass method to propagate self.pv one time-step,
3) Calls the update_firing_rate method to compute the firing rate of the population based on flux over threshold, and
4) Calls the update_firing_rate_recorder method to register the current firing rate with the recorder.
'''
self.update_total_input_dict()
self.update_propability_mass()
self.update_firing_rate()
if self.record == True: self.update_firing_rate_recorder()
logger.debug('GID(%s) Firing rate: %3.2f' % (self.gid, self.curr_firing_rate))
self.update_callback(self)
def initialize_edges(self):
'''Initialize self.edges and self.leak_flux_matrix attributes.
This method initializes the self.edges attribute based on the v_min,
v_max, and dv settings, and creates a corresponding leak flux matrix
based on this voltage discretization.
'''
# Voltage edges and leak matrix construction
self.tau_m = util.discretize_if_needed(self.tau_m)
if np.sum(self.tau_m.xk <= 0) > 0:
raise Exception('Negative tau_m values detected: %s' % self.tau_m.xk) # pragma: no cover
# Voltage edges and leak matrix construction
self.edges = util.get_v_edges(self.v_min, self.v_max, self.dv)
# Different leak matrices for different solvers:
self.leak_flux_matrix_dict = {}
self.leak_flux_matrix_dict['dense'] = util.leak_matrix(self.edges, self.tau_m)
# Backward Euler sparse:
lfm_csrbe = sps.eye(np.shape(self.leak_flux_matrix_dict['dense'])[0], format='csr') - self.simulation.dt*self.leak_flux_matrix_dict['dense']
M_I, M_J = np.where(np.array(lfm_csrbe) != 0)
M_val = lfm_csrbe[M_I, M_J]
self.leak_flux_matrix_dict['sparse'] = (M_I, M_J, M_val)
def initialize_probability(self):
'''Initialize self.pv to delta-distribution at v=0.'''
self.p0 = util.discretize_if_needed(self.p0)
self.pv = util.get_pv_from_p0(self.p0, self.edges)
util.assert_probability_mass_conserved(self.pv, 1e-15)
def initialize_firing_rate_recorder(self):
'''Initialize recorder at the beginning of a simulation.
This method is typically called by the initialize method rather than on
its own. It resets the lists that track the firing rate during
execution of the simulation.
'''
# Set up firing rate recorder:
if len(self.firing_rate_record) == 0:
self.firing_rate_record.append(self.curr_firing_rate)
if len(self.t_record) == 0:
self.t_record.append(self.simulation.t)
def initialize_total_input_dict(self):
'''Initialize dictionary of presynaptic inputs at beginning of simulation
This method is typically called by the initialize method rather than on
its own. It creates a dictionary of synaptic inputs to the population.
'''
# Aggregate input for each connection distribution:
self.total_input_dict = {}
for c in self.source_connection_list:
try:
curr_input = self.total_input_dict.setdefault(c.connection_distribution, 0)
except:
c.initialize_connection_distribution()
curr_input = self.total_input_dict.setdefault(c.connection_distribution, 0)
self.total_input_dict[c.connection_distribution] = curr_input + c.curr_delayed_firing_rate * c.nsyn
def get_total_flux_matrix(self):
'''Create a total flux matrix by summing presynaptic inputs and the leak matrix.'''
total_flux_matrix = self.leak_flux_matrix_dict['dense'].copy()
for key, val in self.total_input_dict.items():
try:
total_flux_matrix += key.flux_matrix_dict['dense'] * val
except:
key.initialize()
total_flux_matrix += key.flux_matrix_dict['dense'] * val
return total_flux_matrix
def update_total_input_dict(self):
'''Update the input dictionary based on the current firing rates of presynaptic populations.'''
# Initialize to zero:
for curr_connection_distribution in self.total_input_dict.keys():
self.total_input_dict[curr_connection_distribution] = 0
for c in self.source_connection_list:
self.total_input_dict[c.connection_distribution] += c.curr_delayed_firing_rate * c.nsyn
def update_propability_mass(self):
"""Create a total flux matrix, and propogate self.pv one time-step."""
# import scipy.linalg as spla
# import matplotlib.pyplot as plt
#
# if self.simulation.t > .09:
#
# for key, val in self.total_input_dict.items():
# print key, val
#
# evs = spla.eigvals(J*self.simulation.dt)
# evs_re = np.real(evs)
# evs_im = np.imag(evs)
# plt.plot(evs_re, evs_im, '.')
# plt.show()
if self.update_method == 'exact':
J = self.get_total_flux_matrix()
self.pv = util.exact_update_method(J, self.pv, dt=self.simulation.dt)
elif self.update_method == 'approx':
J = self.get_total_flux_matrix()
if self.approx_order == None:
self.pv = util.approx_update_method_tol(J, self.pv, tol=self.tol, dt=self.simulation.dt, norm=self.norm)
else:
self.pv = util.approx_update_method_order(J, self.pv, approx_order=self.approx_order, dt=self.simulation.dt)
elif self.update_method == 'gmres':
self.update_propability_mass_backward_euler(lambda J, x0: spsla.gmres(J, x0, x0=x0, tol=self.tol)[0])
else:
raise Exception('Unrecognized population update method: "%s"' % self.update_method) # pragma: no cover
# Checking stability of
if len(np.where(self.pv<0)[0]) != 0 or np.abs(np.abs(self.pv).sum() - 1) > 1e-15:
self.pv[np.where(self.pv<0)] = 0
self.pv /= self.pv.sum()
logger.critical('Normalizing Probability Mass')
def update_propability_mass_backward_euler(self, solver):
# Determine size of sparse array to construct
total_size = len(self.leak_flux_matrix_dict['sparse'][0])
for key, val in self.total_input_dict.items():
try:
total_size += len(key.flux_matrix_dict['sparse'][0])
except:
key.initialize()
total_size += len(key.flux_matrix_dict['sparse'][0])
M_I_total = np.empty(total_size)
M_J_total = np.empty(total_size)
M_val_total = np.empty(total_size)
start_ind = 0
end_ind = len(self.leak_flux_matrix_dict['sparse'][0])
M_I_total[start_ind:end_ind] = self.leak_flux_matrix_dict['sparse'][0]
M_J_total[start_ind:end_ind] = self.leak_flux_matrix_dict['sparse'][1]
M_val_total[start_ind:end_ind] = self.leak_flux_matrix_dict['sparse'][2]
for key, val in self.total_input_dict.items():
start_ind = end_ind
end_ind += len(key.flux_matrix_dict['sparse'][0])
M_I_total[start_ind:end_ind] = key.flux_matrix_dict['sparse'][0]
M_J_total[start_ind:end_ind] = key.flux_matrix_dict['sparse'][1]
M_val_total[start_ind:end_ind] = -key.flux_matrix_dict['sparse'][2]*(val*self.simulation.dt)
J = sps.coo_matrix((M_val_total, (M_I_total, M_J_total)), shape=(len(self.pv), len(self.pv)))
self.pv = solver(J, self.pv)
def update_firing_rate(self):
'''Update curr_firing_rate attribute based on the total flux of probability mass across threshold.'''
# Compute flux:
reduce_list = [key.threshold_flux_vector * val for key, val in self.total_input_dict.items()]
if len(reduce_list) > 0:
flux_vector = reduce(np.add, reduce_list)
self.curr_firing_rate = np.dot(flux_vector, self.pv)
else:
self.curr_firing_rate = 0. # pragma: no cover
def update_firing_rate_recorder(self):
'''Record current time and firing rate, if record==True.
This method is called once per time step. If record is True, calling
this method will append the current time and firing rate to the firing
rate recorder.
'''
self.firing_rate_record.append(self.curr_firing_rate)
self.t_record.append(self.simulation.t)
@property
def source_connection_list(self):
'''List of all connections that are a source for this population.'''
return [c for c in self.simulation.connection_list if c.target == self]
@property
def n_bins(self):
'''Number of probability mass bins.'''
return len(self.edges) - 1
@property
def n_edges(self):
'''Number of probability mass bin edges.'''
return len(self.edges)
@property
def gid(self):
return self.simulation.gid_dict[self]
def plot_probability_distribution(self, ax=None, xlim=None, ylim=None, xlabel='Voltage (Volts)', ylabel='PMF[Voltage]', title=None, **kwargs):
'''Convenience method to plot voltage distribution.
Parameters
----------
ax : None or matplotlib.pyplot.axes object (default=None)
Axes object to plot distribution on. If None specified, a figure and axes object will be created.
'''
import matplotlib.pyplot as plt
show = kwargs.pop('show', False)
if ax == None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.edges[:-1], self.pv, **kwargs)
if ylim is None:
ax.set_ylim(bottom=0, top=np.max(self.pv)*1.1)
else:
ax.set_ylim(ylim)
if xlim is None:
ax.set_xlim(self.edges[:-1][0], self.edges[:-1][-1])
else:
ax.set_xlim(xlim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if not title is None:
ax.set_title(title)
if show == True:
plt.show()
return ax
def plot(self, ax=None, xlim=None, ylim=None, xlabel='Time (Seconds)', ylabel='Firing Rate (Hz)', title=None, **kwargs):
'''Convenience method to plot firing rate history.
Parameters
----------
ax : None or matplotlib.pyplot.axes object (default=None)
Axes object to plot distribution on. If None specified, a figure and axes object will be created.
'''
import matplotlib.pyplot as plt
show = kwargs.pop('show',False)
if ax == None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if self.firing_rate_record is None or self.t_record is None:
raise RuntimeError('Firing rate not recorded on gid: %s' % self.gid) # pragma: no cover
ax.plot(self.t_record, self.firing_rate_record, **kwargs)
if ylim is None:
ax.set_ylim(bottom=0, top=np.max(self.firing_rate_record)*1.1)
else:
ax.set_ylim(ylim)
if xlim is None:
ax.set_xlim(self.t_record[0], self.t_record[-1])
else:
ax.set_xlim(xlim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if not title is None:
ax.set_title(title)
if show == True:
plt.show()
return ax
def get_firing_rate(self, t):
'''Convenience function to get the firing rate at time "t" after simulation'''
try:
ind_list = [bisect.bisect_left(self.t_record,curr_t) for curr_t in t]
return [self.firing_rate_record[ind] for ind in ind_list]
except:
return self.firing_rate_record[bisect.bisect_left(self.t_record,t)]
def to_dict(self):
# Only needed if population not yet initialized:
if self.edges is None:
p0 = self.p0
else:
p0 = (self.edges.tolist(), self.pv.tolist())
if len(self.firing_rate_record) is 0:
initial_firing_rate = 0
else:
initial_firing_rate = self.firing_rate_record[-1]
try:
tau_m = self.tau_m.xk.tolist(), self.tau_m.pk.tolist()
except:
tau_m = self.tau_m
data_dict = {'rank':self.rank,
'p0':p0,
'norm':self.norm,
'tau_m':tau_m,
'v_min':self.v_min,
'v_max':self.v_max,
'dv':self.dv,
'record':self.record,
'initial_firing_rate':initial_firing_rate,
'update_method':self.update_method,
'approx_order':self.approx_order,
'tol':self.tol,
'metadata':self.metadata,
'module':__name__,
'class':self.__class__.__name__,
'firing_rate_record':self.firing_rate_record,
't_record':self.t_record}
return data_dict
def to_json(self, fh=None, **kwargs):
'''Save the contents of the InternalPopultion to json'''
indent = kwargs.pop('indent',2)
data_dict = self.to_dict()
if fh is None:
return json.dumps(data_dict, indent=indent, **kwargs)
else:
return json.dump(data_dict, fh, indent=indent, **kwargs)
def copy(self):
return InternalPopulation(**self.to_dict())
def to_df(self):
return to_df(self)
def initialize_delay_queue(self, max_delay_ind):
return np.core.numeric.ones(max_delay_ind+1)*self.simulation.get_curr_firing_rate(self.gid)
def shutdown(self):
pass
| nicain/dipde_dev | dipde/internals/internalpopulation.py | Python | gpl-3.0 | 20,747 | 0.010604 |
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide an interface for growth experiments."""
from __future__ import absolute_import
import logging
from pandas import DataFrame
from memote.experimental.experiment import Experiment
__all__ = ("GrowthExperiment",)
LOGGER = logging.getLogger(__name__)
class GrowthExperiment(Experiment):
"""Represent a growth experiment."""
SCHEMA = "growth.json"
def __init__(self, **kwargs):
"""
Initialize a growth experiment.
Parameters
----------
kwargs
"""
super(GrowthExperiment, self).__init__(**kwargs)
def load(self, dtype_conversion=None):
"""
Load the data table and corresponding validation schema.
Parameters
----------
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
"""
if dtype_conversion is None:
dtype_conversion = {"growth": str}
super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)
self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def evaluate(self, model):
"""Evaluate in silico growth rates."""
with model:
if self.medium is not None:
self.medium.apply(model)
if self.objective is not None:
model.objective = self.objective
model.add_cons_vars(self.constraints)
growth = list()
for row in self.data.itertuples(index=False):
with model:
exchange = model.reactions.get_by_id(row.exchange)
if bool(exchange.reactants):
exchange.lower_bound = -row.uptake
else:
exchange.upper_bound = row.uptake
growth.append(model.slim_optimize() >= self.minimal_growth_rate)
return DataFrame({"exchange": self.data["exchange"], "growth": growth})
| opencobra/memote | src/memote/experimental/growth.py | Python | apache-2.0 | 2,835 | 0.000353 |
# -*- coding: utf-8 -*-
#!/usr/bin/python
# Author: Tania M. Molina
# UY - 2017
# MIT License
import math
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
import scipy.stats as stats
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
import re
import scipy.stats
import matplotlib.pyplot as mlab
fhand = raw_input('Enter .csv file name or keyword: ')
data = pd.read_csv(fhand, header=0)
frame = pd.DataFrame(data)
| lastralab/Statistics | Specialization/Personal/Rtopy.py | Python | mit | 484 | 0.002066 |
import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory
def head_of_file(path, n):
with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def test_imwrite(self):
with TemporaryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path = os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff')
| korepwx/tfsnippet | tests/examples/utils/test_mlresult.py | Python | mit | 1,216 | 0 |
"""
Tests that ensure the correct items are returned from api calls.
Also tests whether the users with permissions can create logs.
"""
import csv
import datetime
import uuid
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from kolibri.auth.models import DeviceOwner
from .factory_logger import (
FacilityFactory, FacilityUserFactory,
ContentSessionLogFactory, ContentSummaryLogFactory,
ContentRatingLogFactory, UserSessionLogFactory,
DUMMY_PASSWORD
)
from ..models import ContentSessionLog, ContentSummaryLog, ContentRatingLog, UserSessionLog
from ..serializers import ContentSessionLogSerializer, ContentSummaryLogSerializer, ContentRatingLogSerializer
class ContentSessionLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.interaction_logs = [ContentSessionLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex,
'kind': 'video',
'start_timestamp': str(datetime.datetime.now())}
def test_contentsessionlog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentsessionlog-list'))
expected_count = ContentSessionLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_contentsessionlog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.interaction_logs[0].id
response = self.client.get(reverse('contentsessionlog-detail', kwargs={"pk": log_id}))
log = ContentSessionLog.objects.get(pk=log_id)
interaction_serializer = ContentSessionLogSerializer(log)
self.assertEqual(response.data['content_id'], interaction_serializer.data['content_id'])
def test_admin_can_create_contentsessionlog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_contentsessionlog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_contentsessionlog_for_learner(self):
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_anonymous_user_can_create_contentsessionlog(self):
del self.payload['user']
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ContentSummaryLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.summary_logs = [ContentSummaryLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex,
'kind': "video",
'start_timestamp': str(datetime.datetime.now())}
def test_summarylog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentsummarylog-list'))
expected_count = ContentSummaryLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_summarylog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.summary_logs[0].id
response = self.client.get(reverse('contentsummarylog-detail', kwargs={"pk": log_id}))
log = ContentSummaryLog.objects.get(pk=log_id)
summary_serializer = ContentSummaryLogSerializer(log)
self.assertEqual(response.data['content_id'], summary_serializer.data['content_id'])
def test_admin_can_create_summarylog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_summarylog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_summarylog_for_learner(self):
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class ContentRatingLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.rating_logs = [ContentRatingLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex}
def test_ratinglog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentratinglog-list'))
expected_count = ContentRatingLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_ratinglog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.rating_logs[0].id
response = self.client.get(reverse('contentratinglog-detail', kwargs={"pk": log_id}))
log = ContentRatingLog.objects.get(pk=log_id)
rating_serializer = ContentRatingLogSerializer(log)
self.assertEqual(response.data['content_id'], rating_serializer.data['content_id'])
def test_admin_can_create_ratinglog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentratinglog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_ratinglog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentratinglog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_ratinglog_for_learner(self):
response = self.client.post(reverse('contentratinglog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_anonymous_user_can_create_ratinglog(self):
del self.payload['user']
response = self.client.post(reverse('contentratinglog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class UserSessionLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.session_logs = [UserSessionLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
def test_sessionlog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('usersessionlog-list'))
expected_count = UserSessionLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_sessionlog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.session_logs[0].id
response = self.client.get(reverse('usersessionlog-detail', kwargs={"pk": log_id}))
log = UserSessionLog.objects.get(pk=log_id)
self.assertEqual(response.data['user'], log.user.id)
def test_admin_can_create_sessionlog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('usersessionlog-list'), data={'user': self.user.pk}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_sessionlog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('usersessionlog-list'), data={'user': self.user.pk}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_sessionlog_for_learner(self):
response = self.client.post(reverse('usersessionlog-list'), data={'user': self.user.pk}, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class ContentSummaryLogCSVExportTestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.summary_logs = [ContentSummaryLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
def test_csv_download(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
expected_count = ContentSummaryLog.objects.count()
response = self.client.get(reverse('contentsummarylogcsv-list'))
results = list(csv.reader(row for row in response.content.decode("utf-8").split("\n") if row))
for row in results[1:]:
self.assertEqual(len(results[0]), len(row))
self.assertEqual(len(results[1:]), expected_count)
class ContentSessionLogCSVExportTestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.interaction_logs = [ContentSessionLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
def test_csv_download(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
expected_count = ContentSessionLog.objects.count()
response = self.client.get(reverse('contentsessionlogcsv-list'))
results = list(csv.reader(row for row in response.content.decode("utf-8").split("\n") if row))
for row in results[1:]:
self.assertEqual(len(results[0]), len(row))
self.assertEqual(len(results[1:]), expected_count)
| 66eli77/kolibri | kolibri/logger/test/test_api.py | Python | mit | 13,270 | 0.003693 |
# -*- coding: utf-8 -*-
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'django_extensions', # usefull django extensions
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.repo_name}}')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
LOGIN_URL = "account_login"
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
# CELERY CONFIGURATION
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-transport
BROKER_TRANSPORT = 'amqp'
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-pool-limit
# and https://groups.google.com/forum/#!topic/celery-users/JWnh2LFux9o
BROKER_POOL_LIMIT = 4
# See: http://docs.celeryproject.org/en/latest/configuration.html#celery-redirect-stdouts-level
CELERY_REDIRECT_STDOUTS_LEVEL = 'DEBUG'
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-url
BROKER_URL = values.Value(environ_name='RABBITMQ_URL', environ_prefix=None)
# See: http://docs.celeryproject.org/en/latest/configuration.html#celery-result-backend
CELERY_RESULT_BACKEND = 'amqp'
# See: http://celery.readthedocs.org/en/latest/configuration.html#celery-task-result-expires
CELERY_TASK_RESULT_EXPIRES = 21600 # 6 hours
# See: http://docs.celeryproject.org/en/latest/userguide/security.html#serializers
# and: http://docs.celeryproject.org/en/latest/configuration.html#std:setting-CELERY_ACCEPT_CONTENT
CELERY_ACCEPT_CONTENT = ['pickle', 'json']
# END CELERY CONFIGURATION
# Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
# DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
# End mail settings
# django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# end django-debug-toolbar
# Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
# STORAGE CONFIGURATION
# # See: http://django-storages.readthedocs.org/en/latest/index.html
# INSTALLED_APPS += (
# 'storages',
# )
# # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# AWS_ACCESS_KEY_ID = values.SecretValue()
# AWS_SECRET_ACCESS_KEY = values.SecretValue()
# AWS_STORAGE_BUCKET_NAME = values.SecretValue()
# AWS_AUTO_CREATE_BUCKET = True
# AWS_QUERYSTRING_AUTH = False
# # see: https://github.com/antonagestam/collectfast
# AWS_PRELOAD_METADATA = True
# INSTALLED_APPS += ("collectfast", )
# # AWS cache settings, don't change unless you know what you're doing:
# AWS_EXPIREY = 60 * 60 * 24 * 7
# AWS_HEADERS = {
# 'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
# AWS_EXPIREY)
# }
# # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
# STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
# EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
# EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[{{cookiecutter.project_name}}] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
# SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
| benregn/cookiecutter-django-ansible | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/config/settings.py | Python | bsd-3-clause | 15,037 | 0.001796 |
# Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import context
from rally.common.db import api
from rally.common.db import models
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = models.BASE.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = api.get_engine()
with engine.connect() as connection:
context.configure(connection=connection,
render_as_batch=True,
target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
run_migrations_online()
| openstack/rally | rally/common/db/migrations/env.py | Python | apache-2.0 | 1,541 | 0 |
from ctypes import c_float, cast, POINTER
import numpy as np
import OpenGL.GL as gl
import openvr
from openvr.gl_renderer import OpenVrFramebuffer as OpenVRFramebuffer
from openvr.gl_renderer import matrixForOpenVrMatrix as matrixForOpenVRMatrix
from openvr.tracked_devices_actor import TrackedDevicesActor
import gltfutils as gltfu
c_float_p = POINTER(c_float)
class OpenVRRenderer(object):
def __init__(self, multisample=0, znear=0.1, zfar=1000):
self.vr_system = openvr.init(openvr.VRApplication_Scene)
w, h = self.vr_system.getRecommendedRenderTargetSize()
self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample),
OpenVRFramebuffer(w, h, multisample=multisample))
self.vr_compositor = openvr.VRCompositor()
if self.vr_compositor is None:
raise Exception('unable to create compositor')
self.vr_framebuffers[0].init_gl()
self.vr_framebuffers[1].init_gl()
poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
self.poses = poses_t()
self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left,
znear, zfar))),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right,
znear, zfar))))
self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I))
self.view = np.eye(4, dtype=np.float32)
self.view_matrices = (np.empty((4,4), dtype=np.float32),
np.empty((4,4), dtype=np.float32))
self.controllers = TrackedDevicesActor(self.poses)
self.controllers.show_controllers_only = False
self.controllers.init_gl()
self.vr_event = openvr.VREvent_t()
def render(self, gltf, nodes, window_size=(800, 600)):
self.vr_compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
hmd_pose = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
if not hmd_pose.bPoseIsValid:
return
hmd_34 = np.ctypeslib.as_array(cast(hmd_pose.mDeviceToAbsoluteTracking.m, c_float_p),
shape=(3,4))
self.view[:3,:] = hmd_34
view = np.linalg.inv(self.view.T)
view.dot(self.eye_transforms[0], out=self.view_matrices[0])
view.dot(self.eye_transforms[1], out=self.view_matrices[1])
gl.glViewport(0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height)
for eye in (0, 1):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.vr_framebuffers[eye].fb)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gltfu.set_material_state.current_material = None
gltfu.set_technique_state.current_technique = None
for node in nodes:
gltfu.draw_node(node, gltf,
projection_matrix=self.projection_matrices[eye],
view_matrix=self.view_matrices[eye])
self.controllers.display_gl(self.view_matrices[eye], self.projection_matrices[eye])
self.vr_compositor.submit(openvr.Eye_Left, self.vr_framebuffers[0].texture)
self.vr_compositor.submit(openvr.Eye_Right, self.vr_framebuffers[1].texture)
# mirror left eye framebuffer to screen:
gl.glBlitNamedFramebuffer(self.vr_framebuffers[0].fb, 0,
0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height,
0, 0, window_size[0], window_size[1],
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def process_input(self):
pass
# state = self.vr_system.getControllerState(1)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(1, 0, int(3200 * state.rAxis[1].x))
# state = self.vr_system.getControllerState(2)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(2, 0, int(3200 * state.rAxis[1].x))
# if self.vr_system.pollNextEvent(self.vr_event):
# if self.vr_event.eventType == openvr.VREvent_ButtonPress:
# pass #print('vr controller button pressed')
# elif self.vr_event.eventType == openvr.VREvent_ButtonUnpress:
# pass #print('vr controller button unpressed')
def shutdown(self):
self.controllers.dispose_gl()
openvr.shutdown()
| jzitelli/python-gltf-experiments | OpenVRRenderer.py | Python | mit | 4,971 | 0.004828 |
from __future__ import annotations
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Hashable,
Literal,
Mapping,
Sequence,
cast,
overload,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import lib
from pandas._libs.tslibs import (
Period,
Tick,
Timestamp,
to_offset,
)
from pandas._typing import (
Axis,
CompressionOptions,
Dtype,
DtypeArg,
DtypeObj,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
IndexLabel,
JSONSerializable,
Level,
Manager,
RandomState,
Renamer,
StorageOptions,
T,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
final,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError,
InvalidIndexError,
)
from pandas.util._decorators import (
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_ascending,
validate_bool_kwarg,
validate_fillna_kwargs,
)
from pandas.core.dtypes.common import (
ensure_object,
ensure_platform_int,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
arraylike,
indexing,
missing,
nanops,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
)
from pandas.core.describe import describe_ndframe
from pandas.core.flags import Flags
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
RangeIndex,
ensure_index,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
)
from pandas.core.internals.construction import mgr_to_mgr
from pandas.core.missing import find_valid_index
from pandas.core.ops import align_method_FRAME
from pandas.core.reshape.concat import concat
import pandas.core.sample as sample
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import get_indexer_indexer
from pandas.core.window import (
Expanding,
ExponentialMovingWindow,
Rolling,
Window,
)
from pandas.io.formats import format as fmt
from pandas.io.formats.format import (
DataFrameFormatter,
DataFrameRenderer,
)
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
from pandas.core.frame import DataFrame
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.indexers import BaseIndexer
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = {**_shared_docs}
_shared_doc_kwargs = {
"axes": "keywords for axes",
"klass": "Series/DataFrame",
"axes_single_arg": "int or labels for object",
"args_transpose": "axes to permute (int or label for object)",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: list[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
"_flags",
]
_internal_names_set: set[str] = set(_internal_names)
_accessors: set[str] = set()
_hidden_attrs: frozenset[str] = frozenset(
["_AXIS_NAMES", "_AXIS_NUMBERS", "get_values", "tshift"]
)
_metadata: list[str] = []
_is_copy: weakref.ReferenceType[NDFrame] | None = None
_mgr: Manager
_attrs: dict[Hashable, Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: Manager,
copy: bool_t = False,
attrs: Mapping[Hashable, Any] | None = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
def _init_mgr(
cls,
mgr: Manager,
axes,
dtype: Dtype | None = None,
copy: bool_t = False,
) -> Manager:
"""passed a manager and a axes dict"""
for a, axe in axes.items():
if axe is not None:
axe = ensure_index(axe)
bm_axis = cls._get_block_manager_axis(a)
mgr = mgr.reindex_axis(axe, axis=bm_axis)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if (
isinstance(mgr, BlockManager)
and len(mgr.blocks) == 1
and is_dtype_equal(mgr.blocks[0].values.dtype, dtype)
):
pass
else:
mgr = mgr.astype(dtype=dtype)
return mgr
@classmethod
def _from_mgr(cls, mgr: Manager):
"""
Fastpath to create a new DataFrame/Series from just a BlockManager/ArrayManager.
Notes
-----
Skips setting `_flags` attribute; caller is responsible for doing so.
"""
obj = cls.__new__(cls)
object.__setattr__(obj, "_is_copy", None)
object.__setattr__(obj, "_mgr", mgr)
object.__setattr__(obj, "_item_cache", {})
object.__setattr__(obj, "_attrs", {})
return obj
def _as_manager(
self: FrameOrSeries, typ: str, copy: bool_t = True
) -> FrameOrSeries:
"""
Private helper function to create a DataFrame with specific manager.
Parameters
----------
typ : {"block", "array"}
copy : bool, default True
Only controls whether the conversion from Block->ArrayManager
copies the 1D arrays (to ensure proper/contiguous memory layout).
Returns
-------
DataFrame
New DataFrame using specified manager type. Is not guaranteed
to be a copy or not.
"""
new_mgr: Manager
new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy)
# fastpath of passing a manager doesn't check the option/manager class
return self._constructor(new_mgr).__finalize__(self)
# ----------------------------------------------------------------------
# attrs and flags
@property
def attrs(self) -> dict[Hashable, Any]:
"""
Dictionary of global attributes of this dataset.
.. warning::
attrs is experimental and may change without warning.
See Also
--------
DataFrame.flags : Global flags applying to this object.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@final
@property
def flags(self) -> Flags:
"""
Get the properties associated with this pandas object.
The available flags are
* :attr:`Flags.allows_duplicate_labels`
See Also
--------
Flags : Flags that apply to pandas objects.
DataFrame.attrs : Global metadata applying to this dataset.
Notes
-----
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags
<Flags(allows_duplicate_labels=True)>
Flags can be get or set using ``.``
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
Or by slicing with a key
>>> df.flags["allows_duplicate_labels"]
False
>>> df.flags["allows_duplicate_labels"] = True
"""
return self._flags
@final
def set_flags(
self: FrameOrSeries,
*,
copy: bool_t = False,
allows_duplicate_labels: bool_t | None = None,
) -> FrameOrSeries:
"""
Return a new object with updated flags.
Parameters
----------
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
"""
df = self.copy(deep=copy)
if allows_duplicate_labels is not None:
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
@final
@classmethod
def _validate_dtype(cls, dtype) -> DtypeObj | None:
"""validate the passed dtype"""
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Internals
@final
@property
def _data(self):
# GH#33054 retained because some downstream packages uses this,
# e.g. fastparquet
return self._mgr
# ----------------------------------------------------------------------
# Axis
_stat_axis_number = 0
_stat_axis_name = "index"
_AXIS_ORDERS: list[str]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0}
_AXIS_REVERSED: bool_t
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
level = self.ndim + 1
warnings.warn(
"_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=level
)
return {"index": 0}
@property
def _AXIS_NAMES(self) -> dict[int, str]:
""".. deprecated:: 1.1.0"""
level = self.ndim + 1
warnings.warn(
"_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=level
)
return {0: "index"}
@final
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@final
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool_t = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@final
@classmethod
def _get_axis_number(cls, axis: Axis) -> int:
try:
return cls._AXIS_TO_AXIS_NUMBER[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@final
@classmethod
def _get_axis_name(cls, axis: Axis) -> str:
axis_number = cls._get_axis_number(axis)
return cls._AXIS_ORDERS[axis_number]
@final
def _get_axis(self, axis: Axis) -> Index:
axis_number = self._get_axis_number(axis)
assert axis_number in {0, 1}
return self.index if axis_number == 0 else self.columns
@final
@classmethod
def _get_block_manager_axis(cls, axis: Axis) -> int:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
@final
def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]:
# index or columns
axis_index = getattr(self, axis)
d = {}
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
@final
def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]:
from pandas.core.computation.parsing import clean_column_name
d: dict[str, Series | MultiIndex] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
@final
def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self) -> Index:
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> list[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._mgr.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> FrameOrSeries:
...
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis, inplace: Literal[True]
) -> None:
...
@overload
def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(
self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ...
) -> FrameOrSeries | None:
...
def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s or None if ``inplace=True``.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
self._check_inplace_and_allows_duplicate_labels(inplace)
return self._set_axis_nocheck(labels, axis, inplace)
@final
def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
@final
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
# ignore needed because of NDFrame constructor is different than
# DataFrame/Series constructors.
return self._constructor(
# error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected
# "Union[ArrayManager, BlockManager]"
# error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index,
# None, None]"; expected "bool" [arg-type]
# error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index,
# None, None]"; expected "Optional[Mapping[Hashable, Any]]"
new_values, # type: ignore[arg-type]
*new_axes, # type: ignore[arg-type]
).__finalize__(self, method="swapaxes")
@final
@doc(klass=_shared_doc_kwargs["klass"])
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return {klass} with requested index / column level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
{klass}
{klass} with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
return self.set_axis(new_labels, axis=axis, inplace=False)
def pop(self, item: Hashable) -> Series | Any:
result = self[item]
del self[item]
return result
@final
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool_t = True,
inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
) -> FrameOrSeries | None:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new {klass}. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : {klass} (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
self._check_inplace_and_allows_duplicate_labels(inplace)
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level=level)
result._set_axis_nocheck(new_index, axis=axis_no, inplace=True)
result._clear_item_cache()
if inplace:
self._update_inplace(result)
return None
else:
return result.__finalize__(self, method="rename")
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Note that the ``columns`` parameter is not allowed if the
object is a Series. This parameter only apply for DataFrame
type objects.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
@final
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
@final
def _indexed_same(self, other) -> bool_t:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
@final
def equals(self, other: object) -> bool_t:
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal.
The row/column index do not need to have the same type, as long
as the values are considered equal. Corresponding columns must be of
the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not (isinstance(other, type(self)) or isinstance(self, type(other))):
return False
other = cast(NDFrame, other)
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
# Unary Methods
@final
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
@final
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(
"Unary plus expects bool, numeric, timedelta, "
f"or object dtype, not {values.dtype}"
)
return self.__array_wrap__(arr)
@final
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._mgr.apply(operator.invert)
return self._constructor(new_data).__finalize__(self, method="__invert__")
@final
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
@final
def bool(self):
"""
Return the bool of a single element Series or DataFrame.
This must be a boolean scalar value, either True or False. It will raise a
ValueError if the Series or DataFrame does not have exactly 1 element, or that
element is not boolean (integer values 0 and 1 will also raise an exception).
Returns
-------
bool
The value in the Series or DataFrame.
See Also
--------
Series.astype : Change the data type of a Series, including to boolean.
DataFrame.astype : Change the data type of a DataFrame, including to boolean.
numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.
Examples
--------
The method will only work for single element objects with a boolean value:
>>> pd.Series([True]).bool()
True
>>> pd.Series([False]).bool()
False
>>> pd.DataFrame({'col': [True]}).bool()
True
>>> pd.DataFrame({'col': [False]}).bool()
False
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
@final
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
@final
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
@final
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
@final
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key : str
Potential label name
axis : int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
@final
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key : str
Potential label or level name
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
@final
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key : str or object
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
@final
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key : str
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values : np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
f"The {label_axis_name} label '{key}' is not unique.{multi_message}"
)
return values
@final
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys : str or list of str
labels or levels to drop
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: None # type: ignore[assignment]
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@doc(items)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
@final
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(
self,
result: np.ndarray,
context: tuple[Callable, tuple[Any, ...], int] | None = None,
):
"""
Gets called after a ufunc and other functions.
Parameters
----------
result: np.ndarray
The result of the ufunc or other function called on the NumPy array
returned by __array__
context: tuple of (func, tuple, int)
This parameter is returned by ufuncs as a 3-element tuple: (name of the
ufunc, arguments of the ufunc, domain of the ufunc), but is not set by
other numpy functions.q
Notes
-----
Series implements __array_ufunc_ so this not called for ufunc on Series.
"""
res = lib.item_from_zerodim(result)
if is_scalar(res):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return res
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
# error: Argument 1 to "NDFrame" has incompatible type "ndarray";
# expected "BlockManager"
return self._constructor(res, **d).__finalize__( # type: ignore[arg-type]
self, method="__array_wrap__"
)
def __array_ufunc__(
self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any
):
return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
@final
def __getstate__(self) -> dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return {
"_mgr": self._mgr,
"_typ": self._typ,
"_metadata": self._metadata,
"attrs": self.attrs,
"_flags": {k: self.flags[k] for k in self.flags._keys},
**meta,
}
@final
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._mgr = state
elif isinstance(state, dict):
if "_data" in state and "_mgr" not in state:
# compat for older pickles
state["_mgr"] = state.pop("_data")
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
flags = state.get("_flags", {"allows_duplicate_labels": True})
object.__setattr__(self, "_flags", Flags(self, **flags))
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _mgr to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state and k != "_flags":
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
@final
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
@final
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
as_json = data.to_json(orient="table")
as_json = cast(str, as_json)
return json.loads(as_json, object_pairs_hook=collections.OrderedDict)
# ----------------------------------------------------------------------
# I/O Methods
@final
@doc(klass="object", storage_options=_shared_docs["storage_options"])
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
storage_options: StorageOptions = None,
) -> None:
"""
Write {klass} to an Excel sheet.
To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : path-like, file-like, or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
.. deprecated:: 1.2.0
As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer
maintained, the ``xlwt`` engine will be removed in a future version
of pandas.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
{storage_options}
.. versionadded:: 1.2.0
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible to write further
data without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
storage_options=storage_options,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_json(
self,
path_or_buf: FilePathOrBuffer | None = None,
orient: str | None = None,
date_format: str | None = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Callable[[Any], JSONSerializable] | None = None,
lines: bool_t = False,
compression: CompressionOptions = "infer",
index: bool_t = True,
indent: int | None = None,
storage_options: StorageOptions = None,
) -> str | None:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {{'split', 'records', 'index', 'table'}}.
* DataFrame:
- default is 'columns'
- allowed values are: {{'split', 'records', 'index', 'columns',
'values', 'table'}}.
* The format of the JSON string:
- 'split' : dict like {{'index' -> [index], 'columns' -> [columns],
'data' -> [values]}}
- 'records' : list like [{{column -> value}}, ... , {{column -> value}}]
- 'index' : dict like {{index -> {{column -> value}}}}
- 'columns' : dict like {{column -> {{index -> value}}}}
- 'values' : just the values array
- 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}}
Describing the data, where data component is like ``orient='records'``.
date_format : {{None, 'epoch', 'iso'}}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line-delimited json format. Will
throw ValueError if incorrect 'orient' since others are not
list-like.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
``orient='table'`` contains a 'pandas_version' field under 'schema'.
This stores the version of `pandas` used in the latest revision of the
schema.
Examples
--------
>>> import json
>>> df = pd.DataFrame(
... [["a", "b"], ["c", "d"]],
... index=["row 1", "row 2"],
... columns=["col 1", "col 2"],
... )
>>> result = df.to_json(orient="split")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"columns": [
"col 1",
"col 2"
],
"index": [
"row 1",
"row 2"
],
"data": [
[
"a",
"b"
],
[
"c",
"d"
]
]
}}
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> result = df.to_json(orient="records")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
{{
"col 1": "a",
"col 2": "b"
}},
{{
"col 1": "c",
"col 2": "d"
}}
]
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> result = df.to_json(orient="index")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"row 1": {{
"col 1": "a",
"col 2": "b"
}},
"row 2": {{
"col 1": "c",
"col 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> result = df.to_json(orient="columns")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"col 1": {{
"row 1": "a",
"row 2": "c"
}},
"col 2": {{
"row 1": "b",
"row 2": "d"
}}
}}
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> result = df.to_json(orient="values")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
[
"a",
"b"
],
[
"c",
"d"
]
]
Encoding with Table Schema:
>>> result = df.to_json(orient="table")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{{
"schema": {{
"fields": [
{{
"name": "index",
"type": "string"
}},
{{
"name": "col 1",
"type": "string"
}},
{{
"name": "col 2",
"type": "string"
}}
],
"primaryKey": [
"index"
],
"pandas_version": "0.20.0"
}},
"data": [
{{
"index": "row 1",
"col 1": "a",
"col 2": "b"
}},
{{
"index": "row 2",
"col 1": "c",
"col 2": "d"
}}
]
}}
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
storage_options=storage_options,
)
@final
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: int | None = None,
complib: str | None = None,
append: bool_t = False,
format: str | None = None,
index: bool_t = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool_t | None = None,
data_columns: bool_t | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
.. warning::
One can store a subclass of ``DataFrame`` or ``Series`` to HDF5,
but the type of the subclass is lost upon storing.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a SQL table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
@final
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
An `sqlalchemy.engine.Connection` can also be passed to `con`:
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
... df1.to_sql('users', con=connection, if_exists='append')
This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.
>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
>>> df2.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
(1, 'User 7')]
Overwrite the table with just ``df2``.
>>> df2.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 6'), (1, 'User 7')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_pickle(
self,
path,
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
Compression mode may be any of the following possible
values: {{‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}}. If compression
mode is ‘infer’ and path_or_buf is path-like, then detect
compression mode from the following extensions:
‘.gz’, ‘.bz2’, ‘.zip’ or ‘.xz’. (otherwise no compression).
If dict given and mode is ‘zip’ or inferred as ‘zip’, other entries
passed as additional compression options.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
{storage_options}
.. versionadded:: 1.2.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
@final
def to_clipboard(
self, excel: bool_t = True, sep: str | None = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
@final
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@final
@doc(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
position=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{{table.tex}}``.
.. versionchanged:: 1.0.0
Added caption and label arguments.
.. versionchanged:: 1.2.0
Added position argument, changed meaning of caption argument.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {{str: function}}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{{longtable}} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in ``\caption[short_caption]{{full_caption}}``;
if a single string is passed, no short caption will be set.
.. versionadded:: 1.0.0
.. versionchanged:: 1.2.0
Optionally allow caption to be a tuple ``(full_caption, short_caption)``.
label : str, optional
The LaTeX label to be placed inside ``\label{{}}`` in the output.
This is used with ``\ref{{}}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{{}}`` in the output.
.. versionadded:: 1.2.0
{returns}
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
... mask=['red', 'purple'],
... weapon=['sai', 'bo staff']))
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{{tabular}}{{lll}}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{{tabular}}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
self = cast("DataFrame", self)
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return DataFrameRenderer(formatter).to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
position=position,
)
@final
@doc(storage_options=_shared_docs["storage_options"])
def to_csv(
self,
path_or_buf: FilePathOrBuffer[AnyStr] | None = None,
sep: str = ",",
na_rep: str = "",
float_format: str | None = None,
columns: Sequence[Hashable] | None = None,
header: bool_t | list[str] = True,
index: bool_t = True,
index_label: IndexLabel | None = None,
mode: str = "w",
encoding: str | None = None,
compression: CompressionOptions = "infer",
quoting: int | None = None,
quotechar: str = '"',
line_terminator: str | None = None,
chunksize: int | None = None,
date_format: str | None = None,
doublequote: bool_t = True,
escapechar: str | None = None,
decimal: str = ".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> str | None:
r"""
Write object to a comma-separated values (csv) file.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a non-binary file object is passed, it should be opened
with `newline=''`, disabling universal newlines. If a binary
file object is passed, `mode` might need to contain a `'b'`.
.. versionchanged:: 1.2.0
Support for binary file objects was introduced.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
is a non-binary file object.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as
one of the above, other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is
supported for compression modes 'gzip' and 'bz2'
as well as 'zip'.
.. versionchanged:: 1.2.0
Compression is supported for binary file objects.
.. versionchanged:: 1.2.0
Previous versions forwarded dict entries for 'gzip' to
`gzip.open` instead of `gzip.GzipFile` which prevented
setting `mtime`.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']}})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
formatter = DataFrameFormatter(
frame=df,
header=header,
index=index,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
)
return DataFrameRenderer(formatter).to_csv(
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
columns=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
storage_options=storage_options,
)
# ----------------------------------------------------------------------
# Lookup Caching
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
raise AbstractMethodError(self)
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referent")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
raise AbstractMethodError(self)
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: bool_t | None = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take((), kwargs)
self._consolidate_inplace()
new_data = self._mgr.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self, method="take")
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
@final
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if isinstance(key, list):
warnings.warn(
"Passing lists as key for xs is deprecated and will be removed in a "
"future version. Pass key as a tuple instead.",
FutureWarning,
stacklevel=2,
)
if level is not None:
if not isinstance(labels, MultiIndex):
raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
if drop_level:
return self[key]
index = self.columns
else:
index = self.index
self._consolidate_inplace()
if isinstance(index, MultiIndex):
loc, new_index = index._get_loc_level(key, level=0)
if not drop_level:
if lib.is_integer(loc):
new_index = index[loc : loc + 1]
else:
new_index = index[loc]
else:
loc = index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = index[loc]
if is_scalar(loc) and axis == 0:
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._mgr.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
elif is_scalar(loc):
result = self.iloc[:, slice(loc, loc + 1)]
elif axis == 1:
result = self.iloc[:, loc]
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
def __getitem__(self, item):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._mgr.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
@final
def _set_is_copy(self, ref: FrameOrSeries, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referent")
return False
@final
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or (r is not None and r.shape == self.shape):
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referent":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
loc = self.axes[-1].get_loc(key)
self._mgr = self._mgr.idelete(loc)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
@final
def _check_inplace_and_allows_duplicate_labels(self, inplace):
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'inplace=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
@final
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@final
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array"""
return self._mgr.is_view
@final
def reindex_like(
self: FrameOrSeries,
other,
method: str | None = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
@final
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
elif isinstance(axis, MultiIndex) and labels.dtype == "object":
# Set level to zero in case of MultiIndex and label is string,
# because isin can't handle strings for MultiIndexes GH#36293
indexer = ~axis.get_level_values(0).isin(labels)
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
@final
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
result : same type as self
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
@final
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
@final
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
DataFrame with sorted values or None if ``inplace=True``.
See Also
--------
DataFrame.sort_index : Sort a DataFrame by the index.
Series.sort_values : Similar method for a Series.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
... })
>>> df
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
3 NaN 8 4 D
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
Sorting with a key function
>>> df.sort_values(by='col4', key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
>>> df = pd.DataFrame({
... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
... "value": [10, 20, 30, 40, 50]
... })
>>> df
time value
0 0hr 10
1 128hr 20
2 72hr 30
3 48hr 40
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
... by="time",
... key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
3 48hr 40
2 72hr 30
4 96hr 50
1 128hr 20
"""
raise AbstractMethodError(self)
def sort_index(
self,
axis=0,
level=None,
ascending: bool_t | int | Sequence[bool_t | int] = True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool_t = True,
ignore_index: bool_t = False,
key: IndexKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
ascending = validate_ascending(ascending)
target = self._get_axis(axis)
indexer = get_indexer_indexer(
target, level, ascending, kind, na_position, sort_remaining, key
)
if indexer is None:
if inplace:
return
else:
return self.copy()
baxis = self._get_block_manager_axis(axis)
new_data = self._mgr.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic())
if ignore_index:
axis = 1 if isinstance(self, ABCDataFrame) else 0
new_data.set_axis(axis, ibase.default_index(len(indexer)))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
@doc(
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels="",
optional_axis="",
)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform {klass} to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
{optional_labels}
{axes} : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
{optional_axis}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
{klass} with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={{'index', 'columns'}}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
@final
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
@final
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here"""
# reindex doing multiple operations on different axes if indicated
new_data = self._mgr
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_platform_int(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
# If we've made a copy once, no need to make another one
copy = False
if copy and new_data is self._mgr:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: str | None = None,
regex: str | None = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x) -> bool_t:
assert like is not None # needed for mypy
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x) -> bool_t:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
@final
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
@final
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
@final
def sample(
self: FrameOrSeries,
n: int | None = None,
frac: float | None = None,
replace: bool_t = False,
weights=None,
random_state: RandomState | None = None,
axis: Axis | None = None,
ignore_index: bool_t = False,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState,
np.random.Generator, optional. If int, array-like, or BitGenerator, seed for
random number generator. If np.random.RandomState or np.random.Generator,
use as given.
.. versionchanged:: 1.1.0
array-like and BitGenerator object now passed to np.random.RandomState()
as seed
.. versionchanged:: 1.4.0
np.random.Generator objects now accepted
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.3.0
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
DataFrameGroupBy.sample: Generates random samples from each group of a
DataFrame object.
SeriesGroupBy.sample: Generates random samples from each group of a
Series object.
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
obj_len = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
size = sample.process_sampling_size(n, frac, replace)
if size is None:
assert frac is not None
size = round(frac * obj_len)
if weights is not None:
weights = sample.preprocess_weights(self, weights, axis)
sampled_indices = sample.sample(obj_len, size, replace, weights, rs)
result = self.take(sampled_indices, axis=axis)
if ignore_index:
result.index = ibase.default_index(len(result))
return result
@final
@doc(klass=_shared_doc_kwargs["klass"])
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the {klass}.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the {klass}.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... ) # doctest: +SKIP
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
"""
return com.pipe(self, func, *args, **kwargs)
# ----------------------------------------------------------------------
# Attribute access
@final
def __finalize__(
self: FrameOrSeries, other, method: str | None = None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : str, optional
A passed method name providing context on where ``__finalize__``
was called.
.. warning::
The value passed as `method` are not currently considered
stable across pandas releases.
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels
# For subclasses using _metadata.
for name in set(self._metadata) & set(other._metadata):
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
if method == "concat":
allows_duplicate_labels = all(
x.flags.allows_duplicate_labels for x in other.objs
)
self.flags.allows_duplicate_labels = allows_duplicate_labels
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name not in self._internal_names_set
and name not in self._metadata
and name not in self._accessors
and self._info_axis._can_hold_identifiers_and_holds_name(name)
):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
@final
def _dir_additions(self) -> set[str]:
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, its first level values are used.
"""
additions = super()._dir_additions()
if self._info_axis._can_hold_strings:
additions.update(self._info_axis._dir_additions_for_owner)
return additions
# ----------------------------------------------------------------------
# Consolidation of internals
@final
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
cache
"""
if isinstance(self._mgr, (ArrayManager, SingleArrayManager)):
return f()
blocks_before = len(self._mgr.blocks)
result = f()
if len(self._mgr.blocks) != blocks_before:
self._clear_item_cache()
return result
@final
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._mgr = self._mgr.consolidate()
self._protect_consolidate(f)
@final
def _consolidate(self):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Returns
-------
consolidated : same type as caller
"""
f = lambda: self._mgr.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@final
@property
def _is_mixed_type(self) -> bool_t:
if self._mgr.is_single_block:
return False
if self._mgr.any_extension_types:
# Even if they have the same dtype, we can't consolidate them,
# so we pretend this is "mixed'"
return True
return self.dtypes.nunique() > 1
@final
def _check_inplace_setting(self, value) -> bool_t:
"""check whether we allow in-place setting with this type of value"""
if self._is_mixed_type and not self._mgr.is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
@final
def _get_numeric_data(self):
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)
@final
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
raise AbstractMethodError(self)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
raise AbstractMethodError(self)
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Notes
-----
.. deprecated:: 1.3.0
Using ``astype`` to convert from timezone-naive dtype to
timezone-aware dtype is deprecated and will raise in a
future version. Use :meth:`Series.dt.tz_localize` instead.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> from pandas.api.types import CategoricalDtype
>>> cat_dtype = CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
Create a series of dates:
>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
>>> ser_date
0 2020-01-01
1 2020-01-02
2 2020-01-03
dtype: datetime64[ns]
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
# TODO(EA2D): special case not needed with 2D EAs
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 33113: handle empty frame or series
if not results:
return self.copy()
# GH 19920: retain column metadata after concat
result = concat(results, axis=1, copy=False)
result.columns = self.columns
return result
@final
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._mgr.copy(deep=deep)
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
@final
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
@final
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
@final
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
return self._constructor(
self._mgr.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
copy=True,
)
).__finalize__(self)
@final
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True)
).__finalize__(self, method="infer_objects")
@final
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
convert_floating: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
.. versionadded:: 1.2.0
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, ``convert_boolean`` and
``convert_boolean``, it is possible to turn off individual conversions
to ``StringDtype``, the integer extension types, ``BooleanDtype``
or floating extension types, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer
or floating extension type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type. Otherwise, convert to an
appropriate floating extension type.
.. versionchanged:: 1.2
Starting with pandas 1.2, this method also converts float columns
to the nullable floating extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 <NA>
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f Float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
else:
results = [
col._convert_dtypes(
infer_objects,
convert_string,
convert_integer,
convert_boolean,
convert_floating,
)
for col_name, col in self.items()
]
if len(results) > 0:
return concat(results, axis=1, copy=False)
else:
return self.copy()
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> FrameOrSeries | None:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list("ABCD"))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method="ffill")
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
When filling using a DataFrame, replacement happens along
the same column names and same indices
>>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE"))
>>> df.fillna(df2)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if not self._mgr.is_single_block and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._mgr = result._mgr.downcast()
return result
new_data = self._mgr.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
value = value.reindex(self.index, copy=False)
value = value._values
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
is_dict = isinstance(downcast, dict)
for k, v in value.items():
if k not in result:
continue
obj = result[k]
downcast_k = downcast if not is_dict else downcast.get(k)
obj.fillna(v, limit=limit, inplace=True, downcast=downcast_k)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)._data
else:
raise ValueError(f"invalid fill value with a {type(value)}")
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="fillna")
@doc(klass=_shared_doc_kwargs["klass"])
def ffill(
self: FrameOrSeries,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast=None,
) -> FrameOrSeries | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
pad = ffill
@doc(klass=_shared_doc_kwargs["klass"])
def bfill(
self: FrameOrSeries,
axis: None | Axis = None,
inplace: bool_t = False,
limit: None | int = None,
downcast=None,
) -> FrameOrSeries | None:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
backfill = bfill
@doc(
_shared_docs["replace"],
klass=_shared_doc_kwargs["klass"],
inplace=_shared_doc_kwargs["inplace"],
replace_iloc=_shared_doc_kwargs["replace_iloc"],
)
def replace(
self,
to_replace=None,
value=None,
inplace: bool_t = False,
limit: int | None = None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
self._constructor_sliced._replace_single,
args=(to_replace, method, inplace, limit),
)
self = cast("Series", self)
return self._replace_single(to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
if items:
keys, values = zip(*items)
else:
keys, values = ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
if inplace:
return
return self.copy()
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
# Note: Checking below for `in foo.keys()` instead of
# `in foo` is needed for when we have a Series and not dict
mapping = {
col: (to_replace[col], value[col])
for col in to_replace.keys()
if col in value.keys() and col in self
}
return self._replace_columnwise(mapping, inplace, regex)
# {'A': NA} -> 0
elif not is_list_like(value):
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-like to_replace "
"and non-None value"
)
mapping = {
col: (to_rep, value) for col, to_rep in to_replace.items()
}
return self._replace_columnwise(mapping, inplace, regex)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace):
if not is_list_like(value):
# e.g. to_replace = [NA, ''] and value is 0,
# so we replace NA with 0 and then replace '' with 0
value = [value] * len(to_replace)
# e.g. we have to_replace = [NA, ''] and value = [0, 'missing']
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-value and "
"non-None to_replace"
)
mapping = {col: (to_replace, val) for col, val in value.items()}
return self._replace_columnwise(mapping, inplace, regex)
elif not is_list_like(value): # NA -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="replace")
def interpolate(
self: FrameOrSeries,
method: str = "linear",
axis: Axis = 0,
limit: int | None = None,
inplace: bool_t = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> FrameOrSeries | None:
"""
Fill NaN values using an interpolation method.
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
'cubicspline': Wrappers around the SciPy interpolation methods of
similar names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {{'forward', 'backward', 'both'}}, Optional
Consecutive NaNs will be filled in this direction.
If limit is specified:
* If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
* If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
'backwards'.
If 'limit' is not specified:
* If 'method' is 'backfill' or 'bfill', the default is 'backward'
* else the default is 'forward'
.. versionchanged:: 1.1.0
raises ValueError if `limit_direction` is 'forward' or 'both' and
method is 'backfill' or 'bfill'.
raises ValueError if `limit_direction` is 'backward' or 'both' and
method is 'pad' or 'ffill'.
limit_area : {{`None`, 'inside', 'outside'}}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
``**kwargs`` : optional
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame or None
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values or None if ``inplace=True``.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
fillna_methods = ["ffill", "bfill", "pad", "backfill"]
should_transpose = axis == 1 and method not in fillna_methods
obj = self.T if should_transpose else self
if obj.empty:
return self.copy()
if method not in fillna_methods:
axis = self._info_axis_number
if isinstance(obj.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
# Set `limit_direction` depending on `method`
if limit_direction is None:
limit_direction = (
"backward" if method in ("backfill", "bfill") else "forward"
)
else:
if method in ("pad", "ffill") and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in ("backfill", "bfill") and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(obj.index))
index = Index(index)
else:
index = obj.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index.dtype)
or is_datetime64_any_dtype(index.dtype)
or is_timedelta64_dtype(index.dtype)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
new_data = obj._mgr.interpolate(
method=method,
axis=axis,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
result = self._constructor(new_data)
if should_transpose:
result = result.T
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="interpolate")
# ----------------------------------------------------------------------
# Timeseries methods Methods
@final
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
return self._constructor_sliced(
index=self.columns, name=where, dtype=np.float64
)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
self = cast("Series", self)
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
self = cast("DataFrame", self)
return self._constructor(np.nan, index=where, columns=self.columns)
else:
self = cast("DataFrame", self)
return self._constructor_sliced(
np.nan, index=self.columns, name=where[0]
)
locs = self.index.asof_locs(where, ~(nulls._values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
@doc(klass=_shared_doc_kwargs["klass"])
def isna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is an NA value.
See Also
--------
{klass}.isnull : Alias of isna.
{klass}.notna : Boolean inverse of isna.
{klass}.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
return isna(self).__finalize__(self, method="isna")
@doc(isna, klass=_shared_doc_kwargs["klass"])
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self, method="isnull")
@doc(klass=_shared_doc_kwargs["klass"])
def notna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is not an NA value.
See Also
--------
{klass}.notnull : Alias of notna.
{klass}.isna : Boolean inverse of notna.
{klass}.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
return notna(self).__finalize__(self, method="notna")
@doc(notna, klass=_shared_doc_kwargs["klass"])
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notnull")
@final
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self._values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
return self._update_inplace(result)
else:
return result
@final
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
# GH 40420
# Treat missing thresholds as no bounds, not clipping the values
if is_list_like(threshold):
fill_value = np.inf if method.__name__ == "le" else -np.inf
threshold_inf = threshold.fillna(fill_value)
else:
threshold_inf = threshold
subset = method(threshold_inf, axis=axis) | isna(self)
# GH 40420
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis: Axis | None = None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries | None:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array-like, default None
Minimum threshold value. All values below this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
upper : float or array-like, default None
Maximum threshold value. All values above this
threshold will be set to it. A missing
threshold (e.g `NA`) will not clip the value.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame or None
Same type as calling object with the values outside the
clip boundaries replaced or None if ``inplace=True``.
See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
Clips using specific lower threshold per column element, with missing values:
>>> t = pd.Series([2, -4, np.NaN, 6, 3])
>>> t
0 2.0
1 -4.0
2 NaN
3 6.0
4 3.0
dtype: float64
>>> df.clip(t, axis=0)
col_0 col_1
0 9 2
1 -3 -4
2 0 6
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
isna_lower = isna(lower)
if not is_list_like(lower):
if np.any(isna_lower):
lower = None
elif np.all(isna_lower):
lower = None
isna_upper = isna(upper)
if not is_list_like(upper):
if np.any(isna_upper):
upper = None
elif np.all(isna_upper):
upper = None
# GH 2747 (arguments were reversed)
if (
lower is not None
and upper is not None
and is_scalar(lower)
and is_scalar(upper)
):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
@doc(**_shared_doc_kwargs)
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: str | None = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert time series to specified frequency.
Returns the original data conformed to a new index with the specified
frequency.
If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index
is the result of transforming the original index with
:meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index
will map one-to-one to the new index).
Otherwise, the new index will be equivalent to ``pd.date_range(start, end,
freq=freq)`` where ``start`` and ``end`` are, respectively, the first and
last entries in the original index (see :func:`pandas.date_range`). The
values corresponding to any timesteps in the new index which were not present
in the original index will be null (``NaN``), unless a method for filling
such unknowns is provided (see the ``method`` parameter below).
The :meth:`resample` method is more appropriate if an operation on each group of
timesteps (such as an aggregate) is necessary to represent the data at the new
frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {{'start', 'end'}}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
{klass}
{klass} object reindexed to the specified frequency.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({{'s': series}})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@final
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
@final
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_between_time(
start_time, end_time, include_start=include_start, include_end=include_end
)
return self._take_with_is_copy(indexer, axis=axis)
@doc(**_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
) -> Resampler:
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time series.
The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`,
or `TimedeltaIndex`), or the caller must pass the label of a datetime-like
series/index to the ``on``/``level`` keyword parameter.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {{'right', 'left'}}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {{'right', 'left'}}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {{'start', 'end', 's', 'e'}}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {{'timestamp', 'period'}}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
.. deprecated:: 1.1.0
You should add the loffset to the `df.index` after the resample.
See below.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
origin : {{'epoch', 'start', 'start_day', 'end', 'end_day'}}, Timestamp
or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
Returns
-------
pandas.core.Resampler
:class:`~pandas.core.Resampler` object.
See Also
--------
Series.resample : Resample a Series.
DataFrame.resample : Resample a DataFrame.
groupby : Group {klass} by mapping, function, label, or list of labels.
asfreq : Reindex a {klass} with the given frequency without grouping.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(arraylike):
... return np.sum(arraylike) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df2 = pd.DataFrame(
... d2,
... index=pd.MultiIndex.from_product(
... [days, ['morning', 'afternoon']]
... )
... )
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.resample('17min').sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='epoch').sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='2000-01-01').sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.resample('17min', origin='start').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', offset='23h30min').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
If you want to take the largest Timestamp as the end of the bins:
>>> ts.resample('17min', origin='end').sum()
2000-10-01 23:35:00 0
2000-10-01 23:52:00 18
2000-10-02 00:09:00 27
2000-10-02 00:26:00 63
Freq: 17T, dtype: int64
In contrast with the `start_day`, you can use `end_day` to take the ceiling
midnight of the largest Timestamp as the end of the bins and drop the bins
not containing data:
>>> ts.resample('17min', origin='end_day').sum()
2000-10-01 23:38:00 3
2000-10-01 23:55:00 15
2000-10-02 00:12:00 45
2000-10-02 00:29:00 45
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.resample('17min', offset='2min').sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `loffset` argument:
>>> from pandas.tseries.frequencies import to_offset
>>> loffset = '19min'
>>> ts_out = ts.resample('17min').sum()
>>> ts_out.index = ts_out.index + to_offset(loffset)
>>> ts_out
2000-10-01 23:33:00 0
2000-10-01 23:50:00 9
2000-10-02 00:07:00 21
2000-10-02 00:24:00 54
2000-10-02 00:41:00 24
Freq: 17T, dtype: int64
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
origin=origin,
offset=offset,
)
@final
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select initial periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'1M' will display all the rows having their index within the first month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]):
# GH#29623 if first value is end of period, remove offset with n = 1
# before adding the real offset
end_date = end = self.index[0] - offset.base + offset
else:
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if isinstance(offset, Tick) and end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
@final
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select final periods of time series data based on a date offset.
For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
# error: Slice index must be an integer or None
return self.iloc[start:] # type: ignore[misc]
@final
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: bool_t | None = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign lowest rank to NaN values
* bottom: assign highest rank to NaN values
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
# error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected
# "Union[ArrayManager, BlockManager]"
ranks_obj = self._constructor(
ranks, **data._construct_axes_dict() # type: ignore[arg-type]
)
return ranks_obj.__finalize__(self, method="rank")
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
@doc(_shared_docs["compare"], klass=_shared_doc_kwargs["klass"])
def compare(
self,
other,
align_axis: Axis = 1,
keep_shape: bool_t = False,
keep_equal: bool_t = False,
):
from pandas.core.reshape.concat import concat
if type(self) is not type(other):
cls_self, cls_other = type(self).__name__, type(other).__name__
raise TypeError(
f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'"
)
mask = ~((self == other) | (self.isna() & other.isna()))
keys = ["self", "other"]
if not keep_equal:
self = self.where(mask)
other = other.where(mask)
if not keep_shape:
if isinstance(self, ABCDataFrame):
cmask = mask.any()
rmask = mask.any(axis=1)
self = self.loc[rmask, cmask]
other = other.loc[rmask, cmask]
else:
self = self[mask]
other = other[mask]
if align_axis in (1, "columns"): # This is needed for Series
axis = 1
else:
axis = self._get_axis_number(align_axis)
diff = concat([self, other], axis=axis, keys=keys)
if axis >= self.ndim:
# No need to reorganize data if stacking on new axis
# This currently applies for stacking two Series on columns
return diff
ax = diff._get_axis(axis)
ax_names = np.array(ax.names)
# set index names to positions to avoid confusion
ax.names = np.arange(len(ax_names))
# bring self-other to inner level
order = list(range(1, ax.nlevels)) + [0]
if isinstance(diff, ABCDataFrame):
diff = diff.reorder_levels(order, axis=axis)
else:
diff = diff.reorder_levels(order)
# restore the index names in order
diff._get_axis(axis=axis).names = ax_names[order]
# reorder axis to keep things organized
indices = (
np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten()
)
diff = diff.take(indices, axis=axis)
return diff
@doc(**_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : {axes_single_arg}, default 0
Filling axis, method and limit.
broadcast_axis : {axes_single_arg}, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : ({klass}, type of other)
Aligned objects.
"""
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@final
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if (axis is None or axis == 0) and not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if (
(axis is None or axis == 1)
and not is_series
and not self.columns.equals(other.columns)
):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
_left = left.fillna(method=method, axis=fill_axis, limit=limit)
assert _left is not None # needed for mypy
left = _left
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
@final
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._mgr
if axis in [0, 1]:
join_index = self.axes[axis]
lidx, ridx = None, None
if not join_index.equals(other.index):
join_index, lidx, ridx = join_index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
bm_axis = self._get_block_manager_axis(axis)
fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._mgr:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
right.__finalize__(other),
)
@final
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if axis is not None:
axis = self._get_axis_number(axis)
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1, copy=False)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not cond.empty:
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
else:
# GH#21947 we have an empty DataFrame/Series, could be object-dtype
cond = cond.astype(bool)
cond = -cond if inplace else cond
cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False)
# try to align with other
if isinstance(other, NDFrame):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other,
join="left",
axis=axis,
level=level,
fill_value=None,
copy=False,
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not other._indexed_same(self):
raise InvalidIndexError
elif other.ndim < self.ndim:
# TODO(EA2D): avoid object-dtype cast in EA case GH#38729
other = other._values
if axis == 0:
other = np.reshape(other, (-1, 1))
elif axis == 1:
other = np.reshape(other, (1, -1))
other = np.broadcast_to(other, self.shape)
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
elif not isinstance(other, (MultiIndex, NDFrame)):
# mainly just catching Index here
other = extract_array(other, extract_numpy=True)
if isinstance(other, (np.ndarray, ExtensionArray)):
if other.shape != self.shape:
if self.ndim != 1:
# In the ndim == 1 case we may have
# other length 1, which we treat as scalar (GH#2745, GH#4192)
# or len(other) == icond.sum(), which we treat like
# __setitem__ (GH#3235)
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
# error: Argument 1 to "NDFrame" has incompatible type "ndarray";
# expected "BlockManager"
other = self._constructor(
other, **self._construct_axes_dict() # type: ignore[arg-type]
)
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._mgr.putmask(mask=cond, new=other, align=align)
result = self._constructor(new_data)
return self._update_inplace(result)
else:
new_data = self._mgr.where(
other=other,
cond=cond,
align=align,
errors=errors,
)
result = self._constructor(new_data)
return result.__finalize__(self)
@doc(
klass=_shared_doc_kwargs["klass"],
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
"""
Replace values where the condition is {cond_rev}.
Parameters
----------
cond : bool {klass}, array-like, or callable
Where `cond` is {cond}, keep the original value. Where
{cond_rev}, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the {klass} and
should return boolean {klass} or array. The callable must
not change input {klass} (though pandas doesn't check it).
other : scalar, {klass}, or callable
Entries where `cond` is {cond_rev} are replaced with
corresponding value from `other`.
If other is callable, it is computed on the {klass} and
should return scalar or {klass}. The callable must not
change input {klass} (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {{'raise', 'ignore'}}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default None
Try to cast the result back to the input type (if possible).
.. deprecated:: 1.3.0
Manually cast back if necessary.
Returns
-------
Same type as caller or None if ``inplace=True``.
See Also
--------
:func:`DataFrame.{name_other}` : Return an object of same shape as
self.
Notes
-----
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``{name}`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s.mask(s > 1, 10)
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df % 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
other = com.apply_if_callable(other, self)
if try_cast is not lib.no_default:
warnings.warn(
"try_cast keyword is deprecated and will be removed in a "
"future version",
FutureWarning,
stacklevel=4,
)
return self._where(cond, other, inplace, axis, level, errors=errors)
@final
@doc(
where,
klass=_shared_doc_kwargs["klass"],
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
if try_cast is not lib.no_default:
warnings.warn(
"try_cast keyword is deprecated and will be removed in a "
"future version",
FutureWarning,
stacklevel=4,
)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
)
@doc(klass=_shared_doc_kwargs["klass"])
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
"""
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`. `freq` can be inferred
when specified as "infer" as long as either freq or inferred_freq
attribute is set in the index.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
If `freq` is specified as "infer" then it will be inferred from
the freq or inferred_freq attributes of the index. If neither of
those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 1.1.0
Returns
-------
{klass}
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
... "Col2": [13, 23, 18, 33, 48],
... "Col3": [17, 27, 22, 37, 52]}},
... index=pd.date_range("2020-01-01", "2020-01-05"))
>>> df
Col1 Col2 Col3
2020-01-01 10 13 17
2020-01-02 20 23 27
2020-01-03 15 18 22
2020-01-04 30 33 37
2020-01-05 45 48 52
>>> df.shift(periods=3)
Col1 Col2 Col3
2020-01-01 NaN NaN NaN
2020-01-02 NaN NaN NaN
2020-01-03 NaN NaN NaN
2020-01-04 10.0 13.0 17.0
2020-01-05 20.0 23.0 27.0
>>> df.shift(periods=1, axis="columns")
Col1 Col2 Col3
2020-01-01 NaN 10 13
2020-01-02 NaN 20 23
2020-01-03 NaN 15 18
2020-01-04 NaN 30 33
2020-01-05 NaN 45 48
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
2020-01-01 0 0 0
2020-01-02 0 0 0
2020-01-03 0 0 0
2020-01-04 10 13 17
2020-01-05 20 23 27
>>> df.shift(periods=3, freq="D")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
>>> df.shift(periods=3, freq="infer")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
"""
if periods == 0:
return self.copy()
if freq is None:
# when freq is None, data is shifted, index is not
axis = self._get_axis_number(axis)
new_data = self._mgr.shift(
periods=periods, axis=axis, fill_value=fill_value
)
return self._constructor(new_data).__finalize__(self, method="shift")
# when freq is given, index is shifted, data is not
index = self._get_axis(axis)
if freq == "infer":
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not set in the index hence cannot be inferred"
raise ValueError(msg)
elif isinstance(freq, str):
freq = to_offset(freq)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq != orig_freq:
assert orig_freq is not None # for mypy
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
new_ax = index.shift(periods)
else:
new_ax = index.shift(periods, freq)
result = self.set_axis(new_ax, axis=axis)
return result.__finalize__(self, method="shift")
@final
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
.. deprecated:: 1.2.0
slice_shift is deprecated,
use DataFrame/Series.shift instead.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
msg = (
"The 'slice_shift' method is deprecated "
"and will be removed in a future version. "
"You can use DataFrame/Series.shift instead"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self, method="slice_shift")
@final
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
.. deprecated:: 1.1.0
Use `shift` instead.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
warnings.warn(
(
"tshift is deprecated and will be removed in a future version. "
"Please use shift instead."
),
FutureWarning,
stacklevel=2,
)
if freq is None:
freq = "infer"
return self.shift(periods, freq, axis)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax._is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
@final
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
{klass}
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_convert")
@final
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_localize")
# ----------------------------------------------------------------------
# Numeric Methods
@final
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
# error: Incompatible return value type (got "ndarray[Any, dtype[Any]]",
# expected "FrameOrSeries")
return np.abs(self) # type: ignore[return-value]
@final
def describe(
self: FrameOrSeries,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
datetime_is_numeric : bool, default False
Whether to treat datetime dtypes as numeric. This affects statistics
calculated for the column. For DataFrame input, this also
controls whether datetime columns are included by default.
.. versionadded:: 1.1.0
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe(datetime_is_numeric=True)
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
25% 2004-12-31 12:00:00
50% 2010-01-01 00:00:00
75% 2010-01-01 00:00:00
max 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all') # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN a
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[object]) # doctest: +SKIP
object
count 3
unique 3
top a
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top d
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number]) # doctest: +SKIP
categorical object
count 3 3
unique 3 3
top f a
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[object]) # doctest: +SKIP
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
return describe_ndframe(
obj=self,
include=include,
exclude=exclude,
datetime_is_numeric=datetime_is_numeric,
percentiles=percentiles,
)
@final
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
"""
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns', periods=-1)
2016 2015 2014
GOOG 0.179241 0.094112 NaN
APPL -0.252395 -0.011860 NaN
"""
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
_data = self.fillna(method=fill_method, axis=axis, limit=limit)
assert _data is not None # needed for mypy
data = _data
shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
# Unsupported left operand type for / ("FrameOrSeries")
rs = data / shifted - 1 # type: ignore[operator]
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
@final
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@final
def _logical_func(
self, name: str, func, axis=0, bool_only=None, skipna=True, level=None, **kwargs
):
nv.validate_logical_func((), kwargs, fname=name)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.any(level=1) should use df.groupby(level=1).any()",
FutureWarning,
stacklevel=4,
)
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
if self.ndim > 1 and axis is None:
# Reduce along one dimension then the other, to simplify DataFrame._reduce
res = self._logical_func(
name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
)
return res._logical_func(name, func, skipna=skipna, **kwargs)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return self._logical_func(
"any", nanops.nanany, axis, bool_only, skipna, level, **kwargs
)
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return self._logical_func(
"all", nanops.nanall, axis, bool_only, skipna, level, **kwargs
)
@final
def _accum_func(self, name: str, func, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return self.T._accum_func(
name, func, axis=0, skipna=skipna, *args, **kwargs
).T
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
result = nanops.na_accum_func(values, func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
result = self._mgr.apply(block_accum_func)
return self._constructor(result).__finalize__(self, method=name)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func(
"cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func(
"cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs)
@final
def _stat_function_ddof(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
nv.validate_stat_ddof_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.var(level=1) should use df.groupby(level=1).var().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
def sem(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs
)
def var(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs
)
def std(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._stat_function_ddof(
"std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs
)
@final
def _stat_function(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
numeric_only=None,
**kwargs,
):
if name == "median":
nv.validate_median((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.median(level=1) should use df.groupby(level=1).median().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only
)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs
)
def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._stat_function(
"kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs
)
kurtosis = kurt
@final
def _min_count_stat_function(
self,
name: str,
func,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum((), kwargs)
elif name == "prod":
nv.validate_prod((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.sum(level=1) should use df.groupby(level=1).sum().",
FutureWarning,
stacklevel=4,
)
return self._agg_by_level(
name,
axis=axis,
level=level,
skipna=skipna,
min_count=min_count,
numeric_only=numeric_only,
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
def sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return self._min_count_stat_function(
"sum", nanops.nansum, axis, skipna, level, numeric_only, min_count, **kwargs
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return self._min_count_stat_function(
"prod",
nanops.nanprod,
axis,
skipna,
level,
numeric_only,
min_count,
**kwargs,
)
product = prod
def mad(self, axis=None, skipna=None, level=None):
"""
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.mad(level=1) should use df.groupby(level=1).mad()",
FutureWarning,
stacklevel=3,
)
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_params(cls)
@doc(
_bool_doc,
desc=_any_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return NDFrame.any(self, axis, bool_only, skipna, level, **kwargs)
setattr(cls, "any", any)
@doc(
_bool_doc,
desc=_all_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
return NDFrame.all(self, axis, bool_only, skipna, level, **kwargs)
setattr(cls, "all", all)
# error: Argument 1 to "doc" has incompatible type "Optional[str]"; expected
# "Union[str, Callable[..., Any]]"
@doc(
NDFrame.mad.__doc__, # type: ignore[arg-type]
desc="Return the mean absolute deviation of the values "
"over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also="",
examples="",
)
def mad(self, axis=None, skipna=None, level=None):
return NDFrame.mad(self, axis, skipna, level)
setattr(cls, "mad", mad)
@doc(
_num_ddof_doc,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def sem(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.sem(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "sem", sem)
@doc(
_num_ddof_doc,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def var(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.var(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "var", var)
@doc(
_num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
name1=name1,
name2=name2,
axis_descr=axis_descr,
)
def std(
self,
axis=None,
skipna=None,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
return NDFrame.std(self, axis, skipna, level, ddof, numeric_only, **kwargs)
setattr(cls, "std", std)
@doc(
_cnum_doc,
desc="minimum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="min",
examples=_cummin_examples,
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cummin(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummin", cummin)
@doc(
_cnum_doc,
desc="maximum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="max",
examples=_cummax_examples,
)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cummax(self, axis, skipna, *args, **kwargs)
setattr(cls, "cummax", cummax)
@doc(
_cnum_doc,
desc="sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="sum",
examples=_cumsum_examples,
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cumsum(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumsum", cumsum)
@doc(
_cnum_doc,
desc="product",
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name="prod",
examples=_cumprod_examples,
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
return NDFrame.cumprod(self, axis, skipna, *args, **kwargs)
setattr(cls, "cumprod", cumprod)
@doc(
_num_doc,
desc="Return the sum of the values over the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
def sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return NDFrame.sum(
self, axis, skipna, level, numeric_only, min_count, **kwargs
)
setattr(cls, "sum", sum)
@doc(
_num_doc,
desc="Return the product of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_prod_examples,
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
return NDFrame.prod(
self, axis, skipna, level, numeric_only, min_count, **kwargs
)
setattr(cls, "prod", prod)
cls.product = prod
@doc(
_num_doc,
desc="Return the mean of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "mean", mean)
@doc(
_num_doc,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "skew", skew)
@doc(
_num_doc,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "kurt", kurt)
cls.kurtosis = kurt
@doc(
_num_doc,
desc="Return the median of the values over the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
def median(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "median", median)
@doc(
_num_doc,
desc="Return the maximum of the values over the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_max_examples,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "max", max)
@doc(
_num_doc,
desc="Return the minimum of the values over the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is "
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=_stat_func_see_also,
examples=_min_examples,
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs)
setattr(cls, "min", min)
@final
@doc(Rolling)
def rolling(
self,
window: int | timedelta | BaseOffset | BaseIndexer,
min_periods: int | None = None,
center: bool_t = False,
win_type: str | None = None,
on: str | None = None,
axis: Axis = 0,
closed: str | None = None,
method: str = "single",
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
method=method,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
method=method,
)
@final
@doc(Expanding)
def expanding(
self,
min_periods: int = 1,
center: bool_t | None = None,
axis: Axis = 0,
method: str = "single",
) -> Expanding:
axis = self._get_axis_number(axis)
if center is not None:
warnings.warn(
"The `center` argument on `expanding` will be removed in the future",
FutureWarning,
stacklevel=2,
)
else:
center = False
return Expanding(
self, min_periods=min_periods, center=center, axis=axis, method=method
)
@final
@doc(ExponentialMovingWindow)
def ewm(
self,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool_t = True,
ignore_na: bool_t = False,
axis: Axis = 0,
times: str | np.ndarray | FrameOrSeries | None = None,
method: str = "single",
) -> ExponentialMovingWindow:
axis = self._get_axis_number(axis)
# error: Value of type variable "FrameOrSeries" of "ExponentialMovingWindow"
# cannot be "object"
return ExponentialMovingWindow( # type: ignore[type-var]
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
)
# ----------------------------------------------------------------------
# Arithmetic Methods
@final
def _inplace_method(self, other, op):
"""
Wrap arithmetic method to operate inplace.
"""
result = op(self, other)
if (
self.ndim == 1
and result._indexed_same(self)
and is_dtype_equal(result.dtype, self.dtype)
):
# GH#36498 this inplace op can _actually_ be inplace.
self._values[:] = result._values
return self
# Delete cacher
self._reset_cacher()
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False), verify_is_copy=False
)
return self
def __iadd__(self, other):
# error: Unsupported left operand type for + ("Type[NDFrame]")
return self._inplace_method(other, type(self).__add__) # type: ignore[operator]
def __isub__(self, other):
# error: Unsupported left operand type for - ("Type[NDFrame]")
return self._inplace_method(other, type(self).__sub__) # type: ignore[operator]
def __imul__(self, other):
# error: Unsupported left operand type for * ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mul__) # type: ignore[operator]
def __itruediv__(self, other):
# error: Unsupported left operand type for / ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__truediv__ # type: ignore[operator]
)
def __ifloordiv__(self, other):
# error: Unsupported left operand type for // ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__floordiv__ # type: ignore[operator]
)
def __imod__(self, other):
# error: Unsupported left operand type for % ("Type[NDFrame]")
return self._inplace_method(other, type(self).__mod__) # type: ignore[operator]
def __ipow__(self, other):
# error: Unsupported left operand type for ** ("Type[NDFrame]")
return self._inplace_method(other, type(self).__pow__) # type: ignore[operator]
def __iand__(self, other):
# error: Unsupported left operand type for & ("Type[NDFrame]")
return self._inplace_method(other, type(self).__and__) # type: ignore[operator]
def __ior__(self, other):
# error: Unsupported left operand type for | ("Type[NDFrame]")
return self._inplace_method(other, type(self).__or__) # type: ignore[operator]
def __ixor__(self, other):
# error: Unsupported left operand type for ^ ("Type[NDFrame]")
return self._inplace_method(other, type(self).__xor__) # type: ignore[operator]
# ----------------------------------------------------------------------
# Misc methods
@final
def _find_valid_index(self, *, how: str) -> Hashable | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how=how)
if idxpos is None:
return None
return self.index[idxpos]
@final
@doc(position="first", klass=_shared_doc_kwargs["klass"])
def first_valid_index(self) -> Hashable | None:
"""
Return index for {position} non-NA value or None, if no NA value is found.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty {klass}.
"""
return self._find_valid_index(how="first")
@final
@doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"])
def last_valid_index(self) -> Hashable | None:
return self._find_valid_index(how="last")
def _doc_params(cls):
"""Return a tuple of the doc params."""
axis_descr = (
f"{{{', '.join([f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS)])}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
_num_ddof_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
{name1} or {name2} (if level specified)
Notes
-----
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)\n"""
_bool_doc = """
{desc}
Parameters
----------
axis : {{0 or 'index', 1 or 'columns', None}}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be {empty_value}, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
If level is specified, then, {name2} is returned; otherwise, {name1}
is returned.
{see_also}
{examples}"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([], dtype="float64").all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative {desc} over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
{desc}.
Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
Return cumulative {desc} of {name1} or {name2}.
See Also
--------
core.window.Expanding.{accum_func_name} : Similar functionality
but ignores ``NaN`` values.
{name2}.{accum_func_name} : Return the {desc} over
{name2} axis.
{name2}.cummax : Return cumulative maximum over {name2} axis.
{name2}.cummin : Return cumulative minimum over {name2} axis.
{name2}.cumsum : Return cumulative sum over {name2} axis.
{name2}.cumprod : Return cumulative product over {name2} axis.
{examples}"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there is at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([], dtype="float64").any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([], dtype="float64").sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([], dtype="float64").sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([], dtype="float64").prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([], dtype="float64").prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
def _align_as_utc(
left: FrameOrSeries, right: FrameOrSeries, join_index: Index | None
) -> tuple[FrameOrSeries, FrameOrSeries]:
"""
If we are aligning timezone-aware DatetimeIndexes and the timezones
do not match, convert both to UTC.
"""
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
# GH#33671 ensure we don't change the index on
# our original Series (NB: by default deep=False)
left = left.copy()
right = right.copy()
left.index = join_index
right.index = join_index
return left, right
| gfyoung/pandas | pandas/core/generic.py | Python | bsd-3-clause | 387,628 | 0.000689 |
'''OpenGL extension ARB.robustness_application_isolation
This module customises the behaviour of the
OpenGL.raw.WGL.ARB.robustness_application_isolation to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/robustness_application_isolation.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.ARB.robustness_application_isolation import *
from OpenGL.raw.WGL.ARB.robustness_application_isolation import _EXTENSION_NAME
def glInitRobustnessApplicationIsolationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/WGL/ARB/robustness_application_isolation.py | Python | lgpl-3.0 | 867 | 0.008074 |
"""
Database ORM models managed by this Django app
Please do not integrate directly with these models!!! This app currently
offers one programmatic API -- api.py for direct Python integration.
"""
import re
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
class Organization(TimeStampedModel):
"""
An Organization is a representation of an entity which publishes/provides
one or more courses delivered by the LMS. Organizations have a base set of
metadata describing the organization, including id, name, and description.
"""
name = models.CharField(max_length=255, db_index=True)
short_name = models.CharField(
max_length=255,
unique=True,
verbose_name='Short Name',
help_text=_(
'Unique, short string identifier for organization. '
'Please do not use spaces or special characters. '
'Only allowed special characters are period (.), hyphen (-) and underscore (_).'
),
)
description = models.TextField(null=True, blank=True)
logo = models.ImageField(
upload_to='organization_logos',
help_text=_('Please add only .PNG files for logo images. This logo will be used on certificates.'),
null=True, blank=True, max_length=255
)
active = models.BooleanField(default=True)
history = HistoricalRecords()
def __str__(self):
return f"{self.name} ({self.short_name})"
def clean(self):
if not re.match("^[a-zA-Z0-9._-]*$", self.short_name):
raise ValidationError(_('Please do not use spaces or special characters in the short name '
'field. Only allowed special characters are period (.), hyphen (-) '
'and underscore (_).'))
class OrganizationCourse(TimeStampedModel):
"""
An OrganizationCourse represents the link between an Organization and a
Course (via course key). Because Courses are not true Open edX entities
(in the Django/ORM sense) the modeling and integrity is limited to that
of specifying course identifier strings in this model.
"""
course_id = models.CharField(max_length=255, db_index=True, verbose_name='Course ID')
organization = models.ForeignKey(Organization, db_index=True, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
history = HistoricalRecords()
class Meta:
""" Meta class for this Django model """
unique_together = (('course_id', 'organization'),)
verbose_name = _('Link Course')
verbose_name_plural = _('Link Courses')
| edx/edx-organizations | organizations/models.py | Python | agpl-3.0 | 2,793 | 0.002148 |
import keras | jadsonjs/DataScience | DeepLearning/keras/hello_world.py | Python | apache-2.0 | 12 | 0.083333 |
# Copyright 2014 Intel Corp.
#
# Author: Zhai Edwin <edwin.zhai@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/ipmi/manager.py
"""
from ceilometer.ipmi import manager
from ceilometer.tests import agentbase
import mock
from oslotest import base
class TestManager(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_load_plugins(self):
mgr = manager.AgentManager()
self.assertIsNotNone(list(mgr.pollster_manager))
class TestRunTasks(agentbase.BaseAgentManagerTestCase):
@staticmethod
def create_manager():
return manager.AgentManager()
def setUp(self):
self.source_resources = True
super(TestRunTasks, self).setUp()
| froyobin/ceilometer | ceilometer/tests/ipmi/test_manager.py | Python | apache-2.0 | 1,263 | 0 |
#!/usr/bin/env python3
"""
Copyright 2017 Jocelyn Falempe kdj0c@djinvi.net
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from onepagepoints import *
import yaml
import os
import copy
import argparse
import pathlib
from string import ascii_uppercase
from collections import OrderedDict
# return pretty string for points
def points(n):
if n == 0:
return "Free"
if n == 1:
return "1 pt"
return '{} pts'.format(n)
# return pretty string for duplicates weapons
def pCount(n):
if n < 2:
return ''
return '{}x '.format(n)
# Return unit name and count if more than one
def prettyName(unit):
if unit.count > 1:
return unit.name + ' [{0}]'.format(unit.count)
return unit.name
# Return a pretty string of the list of equipments
def PrettyEquipments(equipments):
equWithCount = list(OrderedDict.fromkeys([(equ, equipments.count(equ)) for equ in equipments]))
return [pCount(c) + e.name + ' ' + e.Profile() for e, c in equWithCount]
class Upgrade:
def __init__(self, batch, faction):
armory = faction.armory
self.getFactionCost = faction.getFactionCost
self.all = batch.get('all', False)
self.text = batch['text']
self.preremove = armory.get(batch.get('pre-remove', {}))
self.preadd = armory.get(batch.get('pre-add', {}))
self.remove = armory.get(batch.get('remove', {}))
self.add = [armory.get(up_add) for up_add in batch['add']]
self.rawcost = []
# Calculate the cost of an upgrade on a unit
# If the upgrade is only for one model, set the unit count to 1
# remove equipment, add new equipment and calculate the new cost.
def Cost_unit(self, unit):
base_unit = copy.copy(unit)
if not self.all:
base_unit.SetCount(1)
base_unit.RemoveEquipments(self.preremove)
base_unit.AddEquipments(self.preadd)
base_unit.SetFactionCost(self.getFactionCost(base_unit))
prev_cost = base_unit.cost
base_unit.RemoveEquipments(self.remove)
costs = []
for upgrade in self.add:
new_unit = copy.copy(base_unit)
new_unit.AddEquipments(upgrade)
new_unit.SetFactionCost(self.getFactionCost(new_unit))
up_cost = new_unit.cost - prev_cost
costs.append(up_cost)
# print('Cost for unit {}: {}'.format(unit.name, costs))
return costs
# an upgrade group cost is calculated for all units who have access to this
# upgrade group, so calculate the mean
def Cost(self, units):
u_count = len(units)
cost = [0] * len(self.add)
for unit in units:
cost = [x + y for x, y in zip(cost, self.Cost_unit(unit))]
self.cost = [int(round(c / u_count)) for c in cost]
# print('Cost for all units: {}'.format(self.cost))
return self.cost
class UpgradeGroup(list):
def __init__(self, ydata, faction):
if 'units' not in ydata:
print('Upgrade group Error, should have a "units" section {}'.format(ydata))
return
self.units = ydata['units']
super().__init__([Upgrade(upgrade, faction) for upgrade in ydata['upgrades']])
self.name = ''
class Faction():
def __init__(self, name):
global armory
self.name = name
self.armory = Armory()
armory = self.armory
self.pages = []
self._parse_yaml()
def _read_yaml(self, filename, path):
fname = os.path.join(path, filename)
with open(fname, "r") as f:
print(' Processing {}'.format(fname))
return yaml.load(f.read())
def _parse_yaml(self):
yfaction = self._read_yaml('faction.yml', self.name)
self.title = yfaction['title']
if os.path.exists(os.path.join('Common', 'equipments.yml')):
yequipments = self._read_yaml('equipments.yml', 'Common')
self.armory.add([Weapon(name, **w) for name, w in yequipments['weapons'].items()])
yequipments = self._read_yaml("equipments.yml", self.name)
self.armory.add([Weapon(name, **w) for name, w in yequipments['weapons'].items()])
self.armory.add([WarGear.from_dict(name, wargear, self.armory) for name, wargear in yequipments['wargear'].items()])
self.factionRules = yequipments['factionRules']
allFiles = os.listdir(self.name)
yunits = self._read_yaml('units.yml', self.name)
yupgrades = self._read_yaml('upgrades.yml', self.name)
units = [Unit.from_dict(yunit, self.armory) for yunit in yunits]
upgrades = [UpgradeGroup(up_group, self) for up_group in yupgrades]
for unit in units:
unit.SetFactionCost(self.getFactionCost(unit))
for g, group in enumerate(upgrades):
affected_units = [unit for unit in units if unit.name in group.units]
if len(affected_units) < len(group.units):
print('Error units in ugrade group not found {}'.format(group.units))
return
for unit in affected_units:
unit.upgrades.append(group)
for upgrade in group:
upgrade.Cost(affected_units)
pages = yfaction.get('pages')
if len(pages) == 1:
spRules = yfaction.get('specialRules', None)
psychics = yfaction.get('psychics', None)
for p, page in enumerate(pages):
# TODO order should come from pages, not from units
punits = [unit for unit in units if unit.name in page]
pugrades = [group for group in upgrades if set(group.units) & set(page)]
for g, group in enumerate(pugrades):
group.name = ascii_uppercase[g]
spRules = yfaction.get('specialRules' + str(p + 1), None)
psychics = yfaction.get('psychics' + str(p + 1), None)
self.pages.append((punits, pugrades, spRules, psychics))
# Get hardcoded cost for per-faction special rules.
def getFactionCost(self, unit):
return sum([self.factionRules[r] for r in unit.specialRules + unit.wargearSp if r in self.factionRules])
class DumpTxt:
def __init__(self):
self.data = []
def _addUnit(self, unit):
data = ['{0} {1} {2}+'.format(prettyName(unit), str(unit.quality), str(unit.basedefense))]
data += [', '.join(PrettyEquipments(unit.equipments))]
data += [", ".join(unit.specialRules)]
data += [", ".join([group.name for group in unit.upgrades])]
data += [points(unit.cost)]
return '\n'.join([d for d in data if d])
def addUnits(self, units):
self.data += [self._addUnit(unit) for unit in units]
def _getUpLine(self, equ, cost):
return ', '.join(PrettyEquipments(equ)) + ' ' + points(cost)
def _getUpGroup(self, group, upgrades):
data = ''
preamble = group + ' | '
ret = []
for up in upgrades:
ret += [preamble + up.text + ':']
ret += [self._getUpLine(addEqu, up.cost[i]) for i, addEqu in enumerate(up.add)]
preamble = ''
return data + '\n'.join(ret)
def addUpgrades(self, upgrades):
self.data += [self._getUpGroup(group.name, group) for group in upgrades]
def addPsychics(self, psychics):
if not psychics:
return
data = [name + '(' + str(power) + '+): ' + desc for power, spell in psychics.items() for name, desc in spell.items()]
self.data.append('\n'.join(data))
def get(self, faction):
for units, upgrades, specialRules, psychics in faction.pages:
self.addUnits(units)
self.addUpgrades(upgrades)
self.data.append('\n'.join([k + ': ' + v for k, v in specialRules.items()]))
self.addPsychics(psychics)
return '\n\n'.join(self.data)
class DumpTex:
def __init__(self):
with open('Template/header.tex') as f:
self.header = f.read()
# Latex uses ~ to prevent line break
def no_line_break(self, s):
return s.replace(' ', '~')
def prettyProfile(self, equipment):
if isinstance(equipment, Weapon):
return self.no_line_break(equipment.Profile())
return equipment.Profile()
# Return a pretty string for latex of the list of equipments
def PrettyEquipments(self, equipments):
equWithCount = list(OrderedDict.fromkeys([(equ, equipments.count(equ)) for equ in equipments]))
return [pCount(c) + self.no_line_break(e.name) + ' ' + self.prettyProfile(e) for e, c in equWithCount]
def _addUnit(self, unit):
cost = unit.cost
equ = ", ".join(['\mbox{' + e + '}' for e in self.PrettyEquipments(unit.equipments)])
sp = ", ".join(unit.specialRules)
up = ", ".join([group.name for group in unit.upgrades])
return ' & '.join([prettyName(unit), str(unit.quality), str(unit.basedefense) + '+', equ, sp, up, points(cost)])
def addUnits(self, units):
self.data.append('\\UnitTable{')
self.data.append('\\\\\n'.join([self._addUnit(unit) for unit in units]) + '}')
def _getUpLine(self, equ, cost):
return ', '.join(self.PrettyEquipments(equ)) + ' & ' + points(cost)
def _getUpGroup(self, group, upgrades):
self.data.append('\\UpgradeTable{')
data = []
preamble = group + ' | '
for up in upgrades:
data += ['\\multicolumn{2}{p{\\dimexpr \\linewidth - 2pt \\relax}}{\\bf ' + preamble + up.text + ': }']
data += [self._getUpLine(addEqu, up.cost[i]) for i, addEqu in enumerate(up.add)]
preamble = ''
self.data.append('\\\\\n'.join(data) + '}')
def addUpgrades(self, upgrades):
for group in upgrades:
self._getUpGroup(group.name, group)
def addSpecialRules(self, sp):
if not sp:
return
self.data.append('\\specialrules')
self.data += ['\\sprule{' + k + '}{' + v + '}' for k, v in sp.items()]
def addPsychics(self, psychics):
if not psychics:
return
self.data.append('\\startpsychic{')
for quality, spells in psychics.items():
self.data += ['\\psychic{' + k + '}{' + str(quality) + '+}{' + v + '}' for k, v in spells.items()]
self.data.append('}')
def get(self, faction):
self.data = ['\\mytitle{' + faction.title + '}']
self.data.append('\\begin{document}')
for units, upgrades, specialRules, psychics in faction.pages:
self.addUnits(units)
self.data.append('\\begin{multicols*}{3}[]')
self.addUpgrades(upgrades)
self.addSpecialRules(specialRules)
self.addPsychics(psychics)
self.data.append('\\end{multicols*}')
self.data.append('\\pagebreak')
self.data.append('\\end{document}')
return self.header + '\n'.join(self.data)
class HtmlTag:
def __init__(self, tag, content, tagparm=''):
self.tag = tag
self.content = content
self.set_indent(0)
self.leaf = isinstance(content, str)
if tagparm:
self.tagparm = ' ' + tagparm
else:
self.tagparm = ''
def __str__(self):
def get_str(c, indent):
if isinstance(c, str):
return indent + c
return str(c)
indent = ' ' * self.indent
if isinstance(self.content, list):
content = '\n'.join(get_str(c, indent) for c in self.content)
else:
content = self.content
if self.leaf:
return '{3}<{0}{1}>{2}</{0}>'.format(self.tag, self.tagparm, content, indent)
return '{3}<{0}{1}>\n{2}\n{3}</{0}>'.format(self.tag, self.tagparm, content, indent)
def set_indent(self, level):
self.indent = level
if isinstance(self.content, HtmlTag):
self.content.set_indent(level + 1)
if isinstance(self.content, list):
for c in self.content:
if isinstance(c, HtmlTag):
c.set_indent(level + 1)
class DumpHtml:
def __init__(self):
with open('Template/header.html') as f:
self.header = f.read()
with open('Template/footer.html') as f:
self.footer = f.read()
def no_line_break(self, s):
return s.replace(' ', ' ')
def points(self, n):
return self.no_line_break(points(n))
def _addUnit(self, unit):
cells = [prettyName(unit), str(unit.quality), str(unit.basedefense) + '+',
',<br> '.join(PrettyEquipments(unit.equipments)),
", ".join(unit.specialRules),
", ".join([group.name for group in unit.upgrades]),
self.points(unit.cost)]
return [HtmlTag('td', cell) for cell in cells]
def addUnits(self, units):
table_header = ['Name [size]', 'Qua', 'Def', 'Equipment', 'Special Rules', 'Upg', 'Cost']
rows = [HtmlTag('tr', [HtmlTag('th', title) for title in table_header])]
rows.extend([HtmlTag('tr', self._addUnit(unit)) for unit in units])
return HtmlTag('table', rows, 'class=unit')
def _getUpLine(self, equ, cost):
cells = [',<br>'.join(PrettyEquipments(equ)), self.points(cost)]
return [HtmlTag('td', cell) for cell in cells]
def _getUpGroup(self, group, upgrades):
preamble = group + ' | '
rows = []
for up in upgrades:
rows.append(HtmlTag('tr', [HtmlTag('th', preamble + up.text + ':'), HtmlTag('th', '')]))
rows.extend(HtmlTag('tr', self._getUpLine(addEqu, up.cost[i])) for i, addEqu in enumerate(up.add))
preamble = ''
return HtmlTag('table', rows, 'class=ut1')
def addUpgrades(self, upgrades):
return [HtmlTag('li', self._getUpGroup(group.name, group)) for group in upgrades]
def addSpecialRules(self, specialRules):
if not specialRules:
return []
lines = [HtmlTag('h3', 'Special Rules')]
lines.extend([HtmlTag('li', [HtmlTag('b', name + ': '), desc]) for name, desc in specialRules.items()])
return lines
def _getSpell(self, name, power, desc):
cell = [HtmlTag('b', name + ' (' + str(power) + '+): '), desc]
return HtmlTag('tr', HtmlTag('td', cell))
def addPsychics(self, psychics):
if not psychics:
return []
lines = [HtmlTag('h3', 'Psychic Spells')]
rows = [self._getSpell(name, power, desc) for power, spell in psychics.items() for name, desc in spell.items()]
lines.append(HtmlTag('li', HtmlTag('table', rows, 'class=psy')))
return lines
def get(self, faction):
body = [HtmlTag('h1', 'Grimdark Future ' + faction.title)]
for units, upgrades, specialRules, psychics in faction.pages:
body.append(self.addUnits(units))
ul = self.addUpgrades(upgrades) + self.addSpecialRules(specialRules) + self.addPsychics(psychics)
body.append(HtmlTag('ul', ul))
return self.header + str(HtmlTag('body', body)) + self.footer
def gen2(extension):
if extension == 'html':
return DumpHtml()
if extension == 'tex':
return DumpTex()
if extension == 'txt':
return DumpTxt()
return None
def write_file(faction, build_dir, ext):
data = gen2(ext).get(faction)
path = os.path.join(build_dir, ext)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
fname = os.path.join(path, faction.name + '.' + ext)
with open(fname, "w") as f:
print(' Writing {}'.format(fname))
f.write(data)
def generateFaction(factionName, build_dir='.', outputs=['html']):
factionName = factionName.strip('/')
print("Building faction " + factionName)
faction = Faction(factionName)
for ext in outputs:
write_file(faction, build_dir, ext)
def main():
parser = argparse.ArgumentParser(description='This script will compute the Unit costs and upgrade costs for a faction, and write the .tex files for LaTeX')
parser.add_argument('-b', '--build-dir', type=str, default='build',
help='directory to write the output files')
parser.add_argument('path', type=str, nargs='+',
help='path to the faction (should contain at list equipments.yml, units.yml, upgrades.yml)')
args = parser.parse_args()
for factionName in args.path:
generateFaction(factionName, args.build_dir, ['txt', 'html', 'tex'])
if __name__ == "__main__":
# execute only if run as a script
main()
| kdj0c/onepagepoints | onepagebatch.py | Python | mit | 17,621 | 0.002213 |
from lfmconf.lfmconf import get_lastfm_conf
query_play_count_by_month = """
select * from view_play_count_by_month v
where substr(v.yr_month, 1, 4) =
"""
query_top_with_remaining = """
with top as (
{query_top}
),
total_count as (
{query_play_count}
)
select t.*
from top t
"""
query_top_artists_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining artists' as artist_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_albums_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining albums' as album_name,
'...' as artist_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_tracks_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining tracks' as track_name,
'...' as artist_name,
'...' as album_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_artists = """
select p.artist_name,
count(p.id) as play_count
from play p
where p.artist_name not like 'VA %'
{condition}
group by p.artist_name
order by count(p.id) desc
"""
query_top_albums = """
select p.album_name,
p.artist_name,
count(p.id) as play_count
from play p
where 1 = 1
{condition}
group by p.album_name, p.artist_name
order by count(p.id) desc
"""
query_top_tracks = """
select p.track_name,
p.artist_name,
p.album_name,
count(p.id) as play_count
from play p
where 1 = 1
{condition}
group by p.track_name, p.artist_name, p.album_name
order by count(p.id) desc
"""
query_play_count = """
select count(p.id) as play_count
from play p
where 1 = 1
{condition}
"""
conf = get_lastfm_conf()
dbms = conf['lastfm']['db']['dbms']
def build_query_play_count_by_month():
if dbms == 'mysql':
return query_play_count_by_month + '%s'
elif dbms == 'sqlite':
return query_play_count_by_month + '?'
def build_query_play_count_for_duration(duration):
condition = build_duration_condition(duration)
return query_play_count.format(condition=condition)
def build_query_top_artists_for_duration_with_remaining(duration):
query_top = build_query_top_artists_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_artists_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_artists_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_artists.format(condition=condition) + add_limit()
def add_limit():
clause = 'limit '
if dbms == 'mysql':
clause += '%s'
elif dbms == 'sqlite':
clause += '?'
return clause
def build_query_top_albums_for_duration_with_remaining(duration):
query_top = build_query_top_albums_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_albums_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_albums_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_albums.format(condition=condition) + add_limit()
def build_query_top_tracks_for_duration_with_remaining(duration):
query_top = build_query_top_tracks_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_tracks_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_tracks_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_tracks.format(condition=condition) + add_limit()
def build_query_play_count_for_year():
condition = build_year_condition()
return query_play_count.format(condition=condition)
def build_query_top_artists_for_year_with_remaining():
query_top = build_query_top_artists_for_year()
query_count = build_query_play_count_for_year()
return query_top_artists_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_artists_for_year():
condition = build_year_condition()
return query_top_artists.format(condition=condition) + add_limit()
def build_query_top_albums_for_year_with_remaining():
query_top = build_query_top_albums_for_year()
query_count = build_query_play_count_for_year()
return query_top_albums_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_albums_for_year():
condition = build_year_condition()
return query_top_albums.format(condition=condition) + add_limit()
def build_query_top_tracks_for_year_with_remaining():
query_top = build_query_top_tracks_for_year()
query_count = build_query_play_count_for_year()
return query_top_tracks_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_tracks_for_year():
condition = build_year_condition()
return query_top_tracks.format(condition=condition) + add_limit()
def build_duration_condition(duration):
condition = ''
if duration.isdigit():
if dbms == 'mysql':
condition = 'and p.play_date > now() + interval - %s day'
elif dbms == 'sqlite':
condition =\
'and date(p.play_date) > date(\'now\', \'-\' || ? || \' day\')'
return condition
def build_year_condition():
condition = ''
if dbms == 'mysql':
condition = 'and year(p.play_date) = %s'
elif dbms == 'sqlite':
condition = 'and strftime(\'%Y\', p.play_date) = ?'
return condition
| nicolasm/lastfm-export | queries/tops.py | Python | mit | 6,184 | 0.000809 |
from utils.strings import quote
from plugins.languages import javascript
from utils.loggers import log
from utils import rand
import base64
import re
class Dot(javascript.Javascript):
def init(self):
self.update_actions({
'render' : {
'render': '{{=%(code)s}}',
'header': '{{=%(header)s}}',
'trailer': '{{=%(trailer)s}}'
},
'write' : {
'call' : 'inject',
'write' : """{{=global.process.mainModule.require('fs').appendFileSync('%(path)s', Buffer('%(chunk_b64)s', 'base64'), 'binary')}}""",
'truncate' : """{{=global.process.mainModule.require('fs').writeFileSync('%(path)s', '')}}"""
},
'read' : {
'call': 'evaluate',
'read' : """global.process.mainModule.require('fs').readFileSync('%(path)s').toString('base64');"""
},
'md5' : {
'call': 'evaluate',
'md5': """global.process.mainModule.require('crypto').createHash('md5').update(global.process.mainModule.require('fs').readFileSync('%(path)s')).digest("hex");"""
},
'evaluate' : {
'test_os': """global.process.mainModule.require('os').platform()""",
},
'execute' : {
'call': 'evaluate',
'execute': """global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString());"""
},
'execute_blind' : {
# The bogus prefix is to avoid false detection of Javascript instead of doT
'call': 'inject',
'execute_blind': """{{=''}}{{global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString() + ' && sleep %(delay)i');}}"""
},
})
self.set_contexts([
# Text context, no closures
{ 'level': 0 },
{ 'level': 1, 'prefix': '%(closure)s;}}', 'suffix' : '{{1;', 'closures' : javascript.ctx_closures },
])
| epinna/tplmap | plugins/engines/dot.py | Python | gpl-3.0 | 2,139 | 0.013558 |
'''Dirty talk like you're in Dundalk'''
import random
import re
import string
__author__ = ('iandioch')
COMMAND = 'flirt'
PHRASES = [
"rawr~, {s}{sep}",
"{s}, big boy{sep}",
"{s} xo",
"{s} bb{sep}",
"babe, {s}{sep}",
"hey xxx {s}{sep}",
"{s} xxx",
"{s} xx",
"{s} xo",
"{s} xoxo",
"hot stuff, {s}{sep}",
"{s} bbz{sep}",
"{s} 8==)",
"i'm horny. {s}{sep}",
"do you want to come over tonight..? {s}{sep}",
"my parents aren't home, {s}{sep}",
"{s} ;)",
"{s} 🍆",
"{s} 🍆🍆",
"{s} 🍑",
"{s} 🍌",
"{s} 💦💦💦",
"{s} 👅",
"{s} 😘😘",
"{s}, cutie{sep}",
"{s}, you absolute babe",
"{s} later???",
]
def flirt(message):
if len(message) <= 1:
return ''
for sep in '.!?':
s, sepfound, after = message.partition(sep)
numspace = len(s) - len(s.lstrip())
s = ' ' * numspace + \
random.choice(PHRASES).format(s=s.lstrip().lower(), sep=sepfound)
return s + flirt(after)
return message
def main(bot, author_id, message, thread_id, thread_type, **kwargs):
message = bot.fetchThreadMessages(thread_id=thread_id, limit=2)[1]
sauce = flirt(message.text)
bot.sendMessage(sauce, thread_id=thread_id, thread_type=thread_type)
if __name__ == '__main__':
print(flirt('hey brandon do you have a minute'))
print(flirt('I need to talk to you about our lord and saviour steely for a minute. Please brandon.'))
print(flirt('Fine then'))
print(flirt('Your API was shit anyway'))
| sentriz/steely | steely/plugins/flirty.py | Python | gpl-3.0 | 1,575 | 0.001297 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry for Jasmine spec files."""
import os
import re
from flask_registry import RegistryProxy
from werkzeug.utils import import_string
from invenio_ext.registry import DictModuleAutoDiscoverySubRegistry
class JasmineSpecsAutoDiscoveryRegistry(DictModuleAutoDiscoverySubRegistry):
"""Registry for Jasmine spec files.
Looks into /testsuite/js/*.spec.js in each module.
"""
pattern = re.compile("(?:.+\.js$)|(?:.+\.html$)")
specs_folder = 'js'
def __init__(self, *args, **kwargs):
"""Initialize registry."""
super(JasmineSpecsAutoDiscoveryRegistry, self).__init__(
'testsuite', **kwargs
)
def keygetter(self, key, original_value, new_value):
"""No key mapping."""
return key
def _walk_dir(self, pkg, base, root):
"""Recursively register *.spec.js/*.js files."""
for root, dirs, files in os.walk(root):
for name in files:
if JasmineSpecsAutoDiscoveryRegistry.pattern.match(name):
filename = os.path.join(root, name)
filepath = "{0}/{1}".format(
pkg,
filename[len(base) + 1:]
)
self.register(filename, key=filepath)
def _discover_module(self, pkg):
"""Load list of files from resource directory."""
import_str = pkg + '.' + self.module_name
try:
module = import_string(import_str, silent=self.silent)
if module is not None:
for p in module.__path__:
specsfolder = os.path.join(p, self.specs_folder)
if os.path.isdir(specsfolder):
self._walk_dir(pkg, specsfolder, specsfolder)
except ImportError as e: # pylint: disable=C0103
self._handle_importerror(e, pkg, import_str)
except SyntaxError as e:
self._handle_syntaxerror(e, pkg, import_str)
specs = RegistryProxy("jasmine.specs", JasmineSpecsAutoDiscoveryRegistry)
| tiborsimko/invenio-ext | invenio_ext/jasmine/registry.py | Python | gpl-2.0 | 2,838 | 0.000705 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.testing.config import ctx_list
from tvm.relay.prelude import Prelude
import pytest
def check_result(args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
Parameters
----------
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
for target, ctx in ctx_list():
vm = relay.create_executor('vm', ctx=ctx, target=target, mod=mod)
rts_result = vm.evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.asnumpy())
def veval(f, *args, ctx=tvm.cpu(), target="llvm"):
if isinstance(f, relay.Expr):
mod = relay.Module()
mod["main"] = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
return vm.invoke("main", *args)
else:
assert isinstance(f, relay.Module), "expected expression or module"
mod = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
ret = vm.invoke("main", *args)
return ret
def vmobj_to_list(o):
if isinstance(o, tvm.relay.backend.vmobj.TensorObject):
return [o.asnumpy().tolist()]
elif isinstance(o, tvm.relay.backend.vmobj.DatatypeObject):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def test_split():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
f = relay.Function([x], y)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
ref_res = np.split(x_data, 3, axis=0)
for i in range(3):
tvm.testing.assert_allclose(res[i].asnumpy(), ref_res[i])
def test_split_no_fuse():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
z = relay.annotation.stop_fusion(z)
f = relay.Function([x], z)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0])
def test_id():
x = relay.var('x', shape=(10, 10), dtype='float64')
f = relay.Function([x], x)
x_data = np.random.rand(10, 10).astype('float64')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data, mod=mod)
def test_op():
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], 2 * x_data, mod=mod)
def any(x):
x = relay.op.nn.batch_flatten(x)
return relay.op.min(x, axis=[0, 1])
def test_cond():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
# f = relay.Function([x, y], relay.op.equal(x, y))
f = relay.Function([x, y], any(relay.op.equal(x, y)))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], True, mod=mod)
# diff
check_result([x_data, y_data], False, mod=mod)
def test_simple_if():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
f = relay.Function([x, y],
relay.If(any(relay.op.equal(x, y)), x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], x_data, mod=mod)
# diff
check_result([x_data, y_data], y_data, mod=mod)
def test_simple_call():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
sb.ret(i)
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('iarg', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
check_result([i_data], i_data, mod=mod)
def test_count_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype='int32'))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
check_result([i_data], i_data, mod=mod)
def test_sum_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
accum = relay.var('accum', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, 'int32'))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
loop_bound = 0
i_data = np.array(loop_bound, dtype='int32')
accum_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
aarg = relay.var('accum', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
def test_tuple_fst():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 0))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], i_data, mod=mod)
def test_tuple_second():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], j_data, mod=mod)
def test_list_constructor():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
mod["main"] = f
result = veval(mod)
assert len(result) == 2
assert len(result[1]) == 2
obj = vmobj_to_list(result)
tvm.testing.assert_allclose(obj, np.array([3,2,1]))
def test_let_tensor():
sb = relay.ScopeBuilder()
shape = (1,)
x = relay.var('x', shape=shape, dtype='float32')
x1 = relay.var('x1', shape=shape, dtype='float32')
x1 = sb.let(x1, x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.random.rand(*shape).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_let_scalar():
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.array(np.random.rand()).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_compose():
mod = relay.Module()
p = Prelude(mod)
compose = p.compose
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(1.0, 'float32')
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var('y', 'float32')
add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype('float32')
result = veval(mod, [x_data])
tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
def test_list_hd():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
hd = p.hd
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
three = hd(one4)
f = relay.Function([], three)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 3)
@pytest.mark.xfail
def test_list_tl_empty_list():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
l = p.l
tl = p.tl
f = relay.Function([], tl(nil()))
mod["main"] = f
result = veval(mod)
print(result)
def test_list_tl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
tl = p.tl
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], tl(one4))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2,1]))
def test_list_nth():
expected = list(range(10))
for i in range(len(expected)):
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
nth = p.nth
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
f = relay.Function([], nth(l, relay.const(i)))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), expected[i])
def test_list_update():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
update = p.update
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), relay.const(v))
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
def test_list_length():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
length = p.length
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
l = length(l)
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 10)
def test_list_map():
mod = relay.Module()
p = Prelude(mod)
x = relay.var('x', 'int32')
add_one_func = relay.Function([x], relay.const(1) + x)
nil = p.nil
cons = p.cons
map = p.map
l = cons(relay.const(2), cons(relay.const(1), nil()))
f = relay.Function([], map(add_one_func, l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2]))
def test_list_foldl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
foldl = p.foldl
x = relay.var("x")
y = relay.var("y")
rev_dup_func = relay.Function([y, x], cons(x, cons(x, y)))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldl(rev_dup_func, nil(), l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1]))
def test_list_foldr():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
foldr = p.foldr
x = relay.var("x")
y = relay.var("y")
identity_func = relay.Function([x, y], cons(x, y))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldr(identity_func, nil(), l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3]))
def test_list_sum():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
sum = p.sum
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], sum(l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 6)
def test_list_filter():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
filter = p.filter
x = relay.var("x", 'int32')
greater_than_one = relay.Function([x], x > relay.const(1))
l = cons(relay.const(1),
cons(relay.const(3),
cons(relay.const(1),
cons(relay.const(5),
cons(relay.const(1), nil())))))
f = relay.Function([], filter(greater_than_one, l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))
def test_closure():
x = relay.var('x', shape=())
y = relay.var('y', shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = veval(main)
tvm.testing.assert_allclose(res.asnumpy(), 3.0)
def test_add_op_scalar():
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=())
y = relay.var('y', shape=())
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.array(10.0, dtype='float32')
y_data = np.array(1.0, dtype='float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
def test_add_op_tensor():
"""
test_add_op_tensor:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(10, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(10, 5).astype('float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
def test_add_op_broadcast():
"""
test_add_op_broadcast:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(1, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(1, 5).astype('float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
if __name__ == "__main__":
test_id()
test_op()
test_cond()
test_simple_if()
test_simple_call()
test_count_loop()
test_sum_loop()
test_tuple_fst()
test_tuple_second()
test_let_scalar()
test_let_tensor()
test_split()
test_split_no_fuse()
test_list_constructor()
test_let_tensor()
test_let_scalar()
test_compose()
test_list_hd()
test_list_tl_empty_list()
test_list_tl()
test_list_nth()
test_list_update()
test_list_length()
test_list_map()
test_list_foldl()
test_list_foldr()
test_list_sum()
test_list_filter()
test_closure()
test_add_op_scalar()
test_add_op_tensor()
test_add_op_broadcast()
| Huyuwei/tvm | tests/python/relay/test_vm.py | Python | apache-2.0 | 17,272 | 0.003648 |
#
# Contain the transformation procedure
#
import sys
import module.loop.ast
#-----------------------------------------
def __makeForLoop(id, lbound, ubound, stride, loop_body):
'''Generate a for loop:
for (id=lbound; id<=ubound; id=id+stride)
loop_body'''
init_exp = None
test_exp = None
iter_exp = None
if lbound:
init_exp = module.loop.ast.BinOpExp(id.replicate(),
lbound.replicate(),
module.loop.ast.BinOpExp.EQ_ASGN)
if ubound:
test_exp = module.loop.ast.BinOpExp(id.replicate(),
ubound.replicate(),
module.loop.ast.BinOpExp.LE)
if stride:
it = module.loop.ast.BinOpExp(id.replicate(),
stride.replicate(),
module.loop.ast.BinOpExp.ADD)
iter_exp = module.loop.ast.BinOpExp(id.replicate(),
it,
module.loop.ast.BinOpExp.EQ_ASGN)
return module.loop.ast.ForStmt(init_exp, test_exp, iter_exp, loop_body.replicate())
#-----------------------------------------
def transform(stmt, arg_info):
'''Perform code transformation'''
# extract argument information
loop_order, = arg_info
# get rid of compound statement that contains only a single statement
while isinstance(stmt, module.loop.ast.CompStmt) and len(stmt.stmts) == 1:
stmt = stmt.stmts[0]
# insert loop order information into a hashtable
loop_info = {}
for index_name, is_optional in loop_order:
loop_info[index_name] = [is_optional]
# create loop order (get rid of all optionality information)
loop_order = [iname for iname, opt in loop_order]
# extract loop control information and get the loop body
loop_body = None
cur_stmt = stmt
unseen_loops = loop_order[:]
seen_loops = []
while True:
if isinstance(cur_stmt, module.loop.ast.CompStmt) and len(cur_stmt.stmts) == 1:
cur_stmt = cur_stmt.stmts[0]
continue
is_optional_list = [loop_info[i][0] for i in unseen_loops]
all_unseen_optional = reduce(lambda x,y: x and y, is_optional_list, True)
if isinstance(cur_stmt, module.loop.ast.ForStmt) and not cur_stmt.init:
print ('error:%s:Permut: a loop is assumed to have a non-empty init exp'
% (cur_stmt.line_no))
sys.exit(1)
if (isinstance(cur_stmt, module.loop.ast.ForStmt) and
isinstance(cur_stmt.init, module.loop.ast.BinOpExp) and
cur_stmt.init.op_type == module.loop.ast.BinOpExp.EQ_ASGN and
isinstance(cur_stmt.init.lhs, module.loop.ast.IdentExp)):
iname = cur_stmt.init.lhs.name
if iname in seen_loops:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
print ('error:%s: loop "%s" cannot occur repeatedly'
% (cur_stmt.line_no, iname))
sys.exit(1)
if iname not in unseen_loops:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
print ('error:%s: loop "%s" is not specified in the loop order %s'
% (cur_stmt.line_no, iname, tuple(loop_order)))
sys.exit(1)
linfo = loop_info[iname]
linfo.append(cur_stmt.init)
linfo.append(cur_stmt.test)
linfo.append(cur_stmt.iter)
unseen_loops.remove(iname)
seen_loops.append(iname)
cur_stmt = cur_stmt.stmt
else:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
unfound_loops = filter(lambda x: not loop_info[x][0], unseen_loops)
unfound_loops = tuple(unfound_loops)
print ('error:%s: to-be-permuted loops %s do not exist'
% (stmt.line_no, unfound_loops))
sys.exit(1)
# generate the permuted loop
transformed_stmt = loop_body
rev_loop_order = loop_order[:]
rev_loop_order.reverse()
for iname in rev_loop_order:
linfo = loop_info[iname]
if len(linfo) > 1:
opt, init_exp, test_exp, iter_exp = linfo
transformed_stmt = module.loop.ast.ForStmt(init_exp.replicate(),
test_exp.replicate(),
iter_exp.replicate(),
transformed_stmt)
return transformed_stmt
| tajkhan/pluto-pocc | annotations/module/loop/submodule/permut/transformator.py | Python | gpl-3.0 | 4,926 | 0.003857 |
import unittest
from django.test.client import Client
from django.forms import ValidationError
from fields import MultipleEmailField
class CaseTests(unittest.TestCase):
def setUp(self):
self.c = Client()
self.case_id = 12345
self.status_codes = [301, 302]
def test_cases(self):
response = self.c.get('/cases/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_new(self):
response = self.c.get('/case/new/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_clone(self):
response = self.c.get('/cases/clone/', {'case': 12197})
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_cases_changestatus(self):
response = self.c.get('/cases/changestatus/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_cases_priority(self):
response = self.c.get('/cases/priority/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_getcase(self):
location = '/case/%s' % self.case_id
response = self.c.get(location)
if response.status_code == 301:
print response.path
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_case_details(self):
location = '/case/%s/details' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
# self.assertEquals(response.status_code, 302)
def test_case_edit(self):
location = '/case/%s/edit/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_history(self):
location = '/case/%s/history/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_changecaseorder(self):
location = '/case/%s/changecaseorder/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_attachment(self):
location = '/case/%s/attachment/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_log(self):
location = '/case/%s/log/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_bug(self):
location = '/case/%s/bug/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_plan(self):
location = '/case/%s/plan/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
class Test_MultipleEmailField(unittest.TestCase):
def setUp(self):
self.default_delimiter = ','
self.field = MultipleEmailField(delimiter=self.default_delimiter)
self.all_valid_emails = (
'cqi@redhat.com', 'cqi@yahoo.com', 'chen@gmail.com', )
self.include_invalid_emails = (
'', ' cqi@redhat.com', 'chen@sina.com', )
def test_to_python(self):
value = 'cqi@redhat.com'
pyobj = self.field.to_python(value)
self.assertEqual(pyobj, ['cqi@redhat.com'])
value = 'cqi@redhat.com,,cqi@gmail.com,'
pyobj = self.field.to_python(value)
self.assertEqual(pyobj, ['cqi@redhat.com', 'cqi@gmail.com'])
for value in ('', None, []):
pyobj = self.field.to_python(value)
self.assertEqual(pyobj, [])
def test_clean(self):
value = 'cqi@redhat.com'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com'])
value = 'cqi@redhat.com,cqi@gmail.com'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com', 'cqi@gmail.com'])
value = ',cqi@redhat.com, ,cqi@gmail.com, \n'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com', 'cqi@gmail.com'])
value = ',cqi,cqi@redhat.com, \n,cqi@gmail.com, '
self.assertRaises(ValidationError, self.field.clean, value)
value = ''
self.field.required = True
self.assertRaises(ValidationError, self.field.clean, value)
value = ''
self.field.required = False
data = self.field.clean(value)
self.assertEqual(data, [])
if __name__ == '__main__':
unittest.main()
| ShaolongHu/Nitrate | tcms/testcases/tests.py | Python | gpl-2.0 | 5,924 | 0 |
"""
Shogun demo
Fernando J. Iglesias Garcia
This example shows the use of dimensionality reduction methods, mainly
Stochastic Proximity Embedding (SPE), although Isomap is also used for
comparison. The data selected to be embedded is an helix. Two different methods
of SPE (global and local) are applied showing that the global method outperforms
the local one in this case. Actually the results of local SPE are fairly poor
for this input. Finally, the reduction achieved with Isomap is better than the
two previous ones, more robust against noise. Isomap exploits the
parametrization of the input data.
"""
import math
import mpl_toolkits.mplot3d as mpl3
import numpy as np
import pylab
import util
from modshogun import RealFeatures
from modshogun import StochasticProximityEmbedding, SPE_GLOBAL
from modshogun import SPE_LOCAL, Isomap
# Number of data points
N = 500
# Generate helix
t = np.linspace(1, N, N).T / N
t = t*2*math.pi
X = np.r_[ [ ( 2 + np.cos(8*t) ) * np.cos(t) ],
[ ( 2 + np.cos(8*t) ) * np.sin(t) ],
[ np.sin(8*t) ] ]
# Bi-color helix
labels = np.round( (t*1.5) ) % 2
y1 = labels == 1
y2 = labels == 0
# Plot helix
fig = pylab.figure()
fig.add_subplot(2, 2, 1, projection = '3d')
pylab.plot(X[0, y1], X[1, y1], X[2, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], X[2, y2], 'go')
pylab.title('Original 3D Helix')
# Create features instance
features = RealFeatures(X)
# Create Stochastic Proximity Embedding converter instance
converter = StochasticProximityEmbedding()
# Set target dimensionality
converter.set_target_dim(2)
# Set strategy
converter.set_strategy(SPE_GLOBAL)
# Compute SPE embedding
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 2)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('SPE with global strategy')
# Compute a second SPE embedding with local strategy
converter.set_strategy(SPE_LOCAL)
converter.set_k(12)
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 3)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('SPE with local strategy')
# Compute Isomap embedding (for comparison)
converter = Isomap()
converter.set_target_dim(2)
converter.set_k(6)
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 4)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('Isomap')
pylab.connect('key_press_event', util.quit)
pylab.show()
| AzamYahya/shogun | examples/undocumented/python_modular/graphical/converter_spe_helix.py | Python | gpl-3.0 | 2,562 | 0.007026 |
#!/usr/bin/python
import computefarm as cf
from computefarm.farm import depth_first, breadth_first
import random
import logging
import numpy as np
HOUR = 60 * 60
default_queue_properties = {
'grid': { 'num': 0, 'mem': 750, 'avg': HOUR, 'std': 0.6 * HOUR},
'prod': { 'num': 0, 'avg': 8 * HOUR, 'std': 3 * HOUR},
'short': { 'num': 500, 'avg': 1.2 * HOUR, 'std': 600},
'long': { 'num': 500, 'avg': 5 * HOUR, 'std': 2 * HOUR},
'test': { 'num': 0, 'avg': 8 * HOUR, 'cpu': 3},
'mp8': { 'num': 0, 'avg': 6 * HOUR, 'std': 4 * HOUR, 'cpu': 8, 'mem': 6000}
}
def sort_like(array, like):
# All items in @like are picked in order if they exist in the array
for x in like:
if x in array:
yield x
# All the remaining are picked here
for x in sorted(set(array) - set(like)):
yield x
log = logging.getLogger('sim')
class Simulation(object):
def __init__(self, nodes, negotiate_interval=150, stat_freq=10, submit_interval=200):
""" Initialize the farm simulation, attach groups and queues to it and
provide method of submitting jobs of a predetermined size into the
queues.
"""
self.farm = cf.Farm()
# Distribution of farm nodes, e.g. 331/90 is ratio of 24/32 core machines
dist = (
(24, 331),
(32, 90),
(8, 238),
)
self.farm.generate_from_dist(dist, size=nodes)
root = self.setup_groups(cf.Group('<root>'))
self.farm.attach_groups(root)
self._init_stat(stat_freq * 100)
#Default ranking
self.farm.set_negotiatior_rank(depth_first)
self.queue = cf.JobQueue()
self.farm.attach_queue(self.queue)
# How many seconds per negotiation/stat gathering cycle
self.int_stat = stat_freq
self.int_negotiate = negotiate_interval
self.int_submit = submit_interval
self.next_stat = 0
self.next_negotiate = 0
self.next_submit = 0
# How many seconds to simulate each step
self.sec_per_step = 5
# these two _set* knobs are used in callbacks by the GUI
def _set_neg_df(self):
self.farm.set_negotiatior_rank(depth_first)
def _set_neg_bf(self):
self.farm.set_negotiatior_rank(breadth_first)
def _init_stat(self, hist_size):
""" Statistics are kept in a constant-size numpy array that is updated
periodically
"""
self._stat = {}
self._stat_size = hist_size
for x in self.farm.groups.active_groups():
self._stat[x.name] = np.zeros((hist_size), int)
def _update_stat(self):
self.farm.update_usage()
for g in self.farm.groups.active_groups():
# Left-shift entire array back by one, so element n -> element n - 1
self._stat[g.name] = np.roll(self._stat[g.name], -1)
# New last element is current update
self._stat[g.name][-1] = g.usage
def setup_groups(self, root):
""" Reflects current ATLAS group structure:
/- atlas +-- production +-- prod
| | |
| | \-- mp8
| | |
| | \-- test
<root>-+ |
| \-- analysis +-- short
| |
| \-- long
\- grid
"""
root.add_child('atlas')
root.add_child('grid', 3)
root['atlas'].add_child('production')
root['atlas'].add_child('analysis')
root['atlas']['production'].add_child('prod', 40)
root['atlas']['production'].add_child('mp8', 5)
root['atlas']['production'].add_child('test', 7)
root['atlas']['analysis'].add_child('short', 10)
root['atlas']['analysis'].add_child('long', 10)
# Populate with default properties from top of this module
for x in root.walk():
if x.name in default_queue_properties:
x.set_character(**default_queue_properties[x.name])
return root
def add_jobs(self):
""" Submit more jobs into the queue, keeping the total idle jobs where
they should be according to the sliders in the GUI.
"""
for group in self.farm.groups.active_groups():
num_submit = group.num - self.farm.queue.get_group_idle(group.name)
if num_submit <= 0:
continue
log.info("Submitting %d more %s jobs", num_submit, group.name)
for n in xrange(num_submit):
# Job length is random within a Gaussian distribution
length = abs(random.gauss(group.avg, group.std))
# Create job object and add it to queue
job = cf.BatchJob(group=group.name, cpus=group.cpu, memory=group.mem,
length=length)
self.queue.add_job(job)
def step(self, dt):
""" Advance time of the simulation by dt steps at a time, making next
submission/negotiation/statistics-gathering as appropriate
"""
for i in xrange(dt):
self.farm.advance_time(self.sec_per_step)
if self.farm.time > self.next_submit:
self.add_jobs()
self.next_submit = self.farm.time + self.int_submit
if self.farm.time > self.next_negotiate:
self.farm.negotiate_jobs()
self.next_negotiate = self.farm.time + self.int_negotiate
if self.farm.time > self.next_stat:
self._update_stat()
self.next_stat = self.farm.time + self.int_stat
def display_order(self):
sort_order = ('short', 'long', 'test', 'prod', 'mp8')
return list(sort_like(self._stat.keys(), sort_order))
def make_plotdata(self, groups='all'):
x = np.arange(self._stat_size)
if groups == 'all':
y = np.vstack((self._stat[x] for x in self.display_order()))
else:
y = np.vstack((self._stat[x] for x in self.display_order() if x in groups))
return x, y
if __name__ == '__main__':
s = Simulation()
| fubarwrangler/atlassim | simulation.py | Python | gpl-2.0 | 6,264 | 0.003033 |
# Note: Modified by Neui (Note: sphinx.util.compat.Directive is deprecated)
#
# Copyright (C) 2011 by Matteo Franchin
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# <http://www.gnu.org/licenses/>.
from sphinx.builders.singlehtml import SingleFileHTMLBuilder
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import re
class globalindex(nodes.General, nodes.Element):
pass
def visit_globalindex_node(self, node):
self.body.append(node['content'])
def depart_globalindex_node(self, node):
pass
class GlobalIndexDirective(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = \
{'maxdepth': directives.nonnegative_int,
'collapse': directives.flag,
'titlesonly': directives.flag}
def run(self):
node = globalindex('')
node['maxdepth'] = self.options.get('maxdepth', 2)
node['collapse'] = 'collapse' in self.options
node['titlesonly'] = 'titlesonly' in self.options
return [node]
def process_globalindex_nodes(app, doctree, fromdocname):
builder = app.builder
if builder.name != SingleFileHTMLBuilder.name:
for node in doctree.traverse(globalindex):
node.parent.remove(node)
else:
docname = builder.config.master_doc
for node in doctree.traverse(globalindex):
kwargs = dict(maxdepth=node['maxdepth'],
collapse=node['collapse'],
titles_only=node['titlesonly'])
rendered_toctree = builder._get_local_toctree(docname, **kwargs)
# For some reason, it refers to docname.html#anchor, where just
# #anchor is enough.
rendered_toctree = rendered_toctree.replace(docname + ".html", '')
# Subsections will be #section#subsection, which is invalid.
# Removing the first #section fixes this.
rendered_toctree = re.sub('href="(?:#[^#"]+)*(#[^"]+)"', \
'href="\\1"', rendered_toctree)
node['content'] = rendered_toctree
def setup(app):
app.add_node(globalindex,
html=(visit_globalindex_node, depart_globalindex_node))
app.add_directive('globalindex', GlobalIndexDirective)
app.connect('doctree-resolved', process_globalindex_nodes)
| vhelin/wla-dx | doc/sphinx/globalindex.py | Python | gpl-2.0 | 2,896 | 0.002762 |
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Resource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str'
}
attribute_map = {
'id': 'Id',
'name': 'Name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""Resource - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this Resource. # noqa: E501
The ID of the resource. # noqa: E501
:return: The id of this Resource. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Resource.
The ID of the resource. # noqa: E501
:param id: The id of this Resource. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Resource. # noqa: E501
The name of the resource. # noqa: E501
:return: The name of this Resource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Resource.
The name of the resource. # noqa: E501
:param name: The name of this Resource. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Resource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Resource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mindbody/API-Examples | SDKs/Python/swagger_client/models/resource.py | Python | bsd-2-clause | 3,703 | 0.00027 |
from django.utils import unittest
from django.contrib import admin
from hyperadmin.sites import ResourceSite
class SiteTestCase(unittest.TestCase):
def test_install_from_admin_site(self):
site = ResourceSite()
admin.autodiscover()
site.install_models_from_site(admin.site)
self.assertTrue(site.registry)
| webcube/django-hyperadmin | hyperadmin/tests/test_sites.py | Python | bsd-3-clause | 360 | 0.008333 |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ruamel.yaml
def none_representer(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:null', 'null')
class YAML(object):
def __init__(self):
"""Wrap construction of ruamel yaml object."""
self.yaml = ruamel.yaml.YAML()
self.yaml.allow_duplicate_keys = True
self.yaml.representer.add_representer(type(None), none_representer)
self.yaml.indent(mapping=2, sequence=4, offset=2)
def load(self, stream):
return self.yaml.load(stream)
def tr(self, x):
x = x.replace('\n-', '\n\n-')
newlines = []
for line in x.split('\n'):
if '#' in line:
newlines.append(line)
else:
newlines.append(line[2:])
return '\n'.join(newlines)
def dump(self, data, *args, **kwargs):
if isinstance(data, list):
kwargs['transform'] = self.tr
self.yaml.dump(data, *args, **kwargs)
_yaml = YAML()
def load(*args, **kwargs):
return _yaml.load(*args, **kwargs)
def dump(*args, **kwargs):
return _yaml.dump(*args, **kwargs)
| openstack-infra/project-config | tools/projectconfig_ruamellib.py | Python | apache-2.0 | 1,722 | 0 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=R0201
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.gclient_scm.CreateSCM
gclient.gclient_scm.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.gclient_scm.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Also test that a From() dependency should not be processed when it is listed
as a requirement.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
' "foo/dir1/dir2/dir5/dir6":\n'
' From("foo/dir1/dir2/dir3/dir4", "foo/dir1/dir2"),\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
# Test From()
write(
os.path.join('foo/dir1/dir2/dir3/dir4', 'DEPS'),
'deps = {\n'
# This one should not be fetched or set as a requirement.
' "foo/dir1/dir2/dir5": "svn://example.com/x",\n'
# This foo/dir1/dir2 points to a different url than the one in bar.
' "foo/dir1/dir2": "/dir1/another",\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
('foo/dir1/dir2/dir5/dir6',
'svn://example.com/foo/dir1/dir2/dir3/dir4/dir1/another'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
'foo/dir1/dir2/dir5/dir6':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3/dir4'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
d = gclient.Dependency(
None, 'name', 'proto://host/path/@revision', None, None, None, None,
None, '', True, False)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'url', None, None, None, None, None, 'DEPS', True, False),
gclient.Dependency(
obj, 'bar', 'url', None, None, None, None, None, 'DEPS', True, False),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'url', None, None, None, None,
None, 'DEPS', True, False),
gclient.Dependency(
obj.dependencies[0], 'foo/dir2',
gclient.GClientKeywords.FromImpl('bar'), None, None, None, None,
None, 'DEPS', True, False),
gclient.Dependency(
obj.dependencies[0], 'foo/dir3',
gclient.GClientKeywords.FileImpl('url'), None, None, None, None,
None, 'DEPS', True, False),
],
[])
# Make sure __str__() works fine.
# pylint: disable=W0212
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(471, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options), [x['action'] for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options),
[x['action'] for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('bar', 'svn://example.com/bar'),
('bar/unix', 'svn://example.com/bar/unix'),
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': None}
),
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
{'foo': 'os1_foo'}
),
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
{'foo': 'os2_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os12_bar'}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "git@github.com:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'url',
None, None, None, None, None, '', True, False)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path can override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/src', 'svn://example.com/foo/src_unix'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('foo/tar', 'svn://example.com/tar'),
],
sorted(self._get_processed()))
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRelativeRecursion(self):
"""Verifies that nested use_relative_paths is always respected."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/bar/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testRecursedepsAltfile(self):
"""Verifies gclient respects the |recursedeps| var syntax with overridden
target DEPS file.
This is what we mean to check here:
- Naming an alternate DEPS file in recursedeps pulls from that one.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = [("bar", "DEPS.alt")]')
write(os.path.join('bar', 'DEPS'), 'ERROR ERROR ERROR')
write(
os.path.join('bar', 'DEPS.alt'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testDepsFromNotAllowedHostsUnspecified(self):
"""Verifies gclient works fine with DEPS without allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsOK(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["example.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(['example.com']), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsBad(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["other.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals(frozenset(['other.com']), dep.allowed_hosts)
self.assertEquals([dep.dependencies[0]], dep.findDepsFromNotAllowedHosts())
self._get_processed()
def testDepsParseFailureWithEmptyAllowedHosts(self):
"""Verifies gclient fails with defined but empty allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = []\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testDepsParseFailureWithNonIterableAllowedHosts(self):
"""Verifies gclient fails with defined but non-iterable allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = None\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
| primiano/depot_tools | tests/gclient_test.py | Python | bsd-3-clause | 36,329 | 0.003854 |
"""
Tests for classes extending Field.
"""
# Allow accessing protected members for testing purposes
# pylint: disable=W0212
from mock import Mock
import unittest
import datetime as dt
import pytz
import warnings
import math
import textwrap
import itertools
from contextlib import contextmanager
import ddt
from xblock.core import XBlock, Scope
from xblock.field_data import DictFieldData
from xblock.fields import (
Any, Boolean, Dict, Field, Float,
Integer, List, String, DateTime, Reference, ReferenceList, Sentinel,
UNIQUE_ID
)
from xblock.test.tools import (
assert_equals, assert_not_equals, assert_in, assert_not_in, assert_false, TestRuntime
)
from xblock.fields import scope_key, ScopeIds
class FieldTest(unittest.TestCase):
""" Base test class for Fields. """
FIELD_TO_TEST = Mock()
def set_and_get_field(self, arg, enforce_type):
"""
Set the field to arg in a Block, get it and return it
"""
class TestBlock(XBlock):
"""
Block for testing
"""
field_x = self.FIELD_TO_TEST(enforce_type=enforce_type)
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = TestBlock(runtime, scope_ids=Mock(spec=ScopeIds))
block.field_x = arg
return block.field_x
@contextmanager
def assertDeprecationWarning(self, count=1):
"""Asserts that the contained code raises `count` deprecation warnings"""
with warnings.catch_warnings(record=True) as caught:
warnings.simplefilter("always", DeprecationWarning)
yield
self.assertEquals(count, sum(
1 for warning in caught
if issubclass(warning.category, DeprecationWarning)
))
def assertJSONOrSetEquals(self, expected, arg):
"""
Asserts the result of field.from_json and of setting field.
"""
# from_json(arg) -> expected
self.assertEqual(expected, self.FIELD_TO_TEST().from_json(arg))
# set+get with enforce_type arg -> expected
self.assertEqual(expected, self.set_and_get_field(arg, True))
# set+get without enforce_type arg -> arg
# provoking a warning unless arg == expected
count = 0 if arg == expected else 1
with self.assertDeprecationWarning(count):
self.assertEqual(arg, self.set_and_get_field(arg, False))
def assertToJSONEquals(self, expected, arg):
"""
Assert that serialization of `arg` to JSON equals `expected`.
"""
self.assertEqual(expected, self.FIELD_TO_TEST().to_json(arg))
def assertJSONOrSetValueError(self, arg):
"""
Asserts that field.from_json or setting the field throws a ValueError
for the supplied value.
"""
# from_json and set+get with enforce_type -> ValueError
with self.assertRaises(ValueError):
self.FIELD_TO_TEST().from_json(arg)
with self.assertRaises(ValueError):
self.set_and_get_field(arg, True)
# set+get without enforce_type -> warning
with self.assertDeprecationWarning():
self.set_and_get_field(arg, False)
def assertJSONOrSetTypeError(self, arg):
"""
Asserts that field.from_json or setting the field throws a TypeError
for the supplied value.
"""
# from_json and set+get with enforce_type -> TypeError
with self.assertRaises(TypeError):
self.FIELD_TO_TEST().from_json(arg)
with self.assertRaises(TypeError):
self.set_and_get_field(arg, True)
# set+get without enforce_type -> warning
with self.assertDeprecationWarning():
self.set_and_get_field(arg, False)
class IntegerTest(FieldTest):
"""
Tests the Integer Field.
"""
FIELD_TO_TEST = Integer
def test_integer(self):
self.assertJSONOrSetEquals(5, '5')
self.assertJSONOrSetEquals(0, '0')
self.assertJSONOrSetEquals(-1023, '-1023')
self.assertJSONOrSetEquals(7, 7)
self.assertJSONOrSetEquals(0, False)
self.assertJSONOrSetEquals(1, True)
def test_float_converts(self):
self.assertJSONOrSetEquals(1, 1.023)
self.assertJSONOrSetEquals(-3, -3.8)
def test_none(self):
self.assertJSONOrSetEquals(None, None)
self.assertJSONOrSetEquals(None, '')
def test_error(self):
self.assertJSONOrSetValueError('abc')
self.assertJSONOrSetValueError('[1]')
self.assertJSONOrSetValueError('1.023')
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class FloatTest(FieldTest):
"""
Tests the Float Field.
"""
FIELD_TO_TEST = Float
def test_float(self):
self.assertJSONOrSetEquals(.23, '.23')
self.assertJSONOrSetEquals(5, '5')
self.assertJSONOrSetEquals(0, '0.0')
self.assertJSONOrSetEquals(-1023.22, '-1023.22')
self.assertJSONOrSetEquals(0, 0.0)
self.assertJSONOrSetEquals(4, 4)
self.assertJSONOrSetEquals(-0.23, -0.23)
self.assertJSONOrSetEquals(0, False)
self.assertJSONOrSetEquals(1, True)
def test_none(self):
self.assertJSONOrSetEquals(None, None)
self.assertJSONOrSetEquals(None, '')
def test_error(self):
self.assertJSONOrSetValueError('abc')
self.assertJSONOrSetValueError('[1]')
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class BooleanTest(FieldTest):
"""
Tests the Boolean Field.
"""
FIELD_TO_TEST = Boolean
def test_false(self):
self.assertJSONOrSetEquals(False, "false")
self.assertJSONOrSetEquals(False, "False")
self.assertJSONOrSetEquals(False, "")
self.assertJSONOrSetEquals(False, "any other string")
self.assertJSONOrSetEquals(False, False)
def test_true(self):
self.assertJSONOrSetEquals(True, "true")
self.assertJSONOrSetEquals(True, "TruE")
self.assertJSONOrSetEquals(True, True)
def test_none(self):
self.assertJSONOrSetEquals(False, None)
def test_everything_converts_to_bool(self):
self.assertJSONOrSetEquals(True, 123)
self.assertJSONOrSetEquals(True, ['a'])
self.assertJSONOrSetEquals(False, [])
class StringTest(FieldTest):
"""
Tests the String Field.
"""
FIELD_TO_TEST = String
def test_json_equals(self):
self.assertJSONOrSetEquals("false", "false")
self.assertJSONOrSetEquals("abba", "abba")
self.assertJSONOrSetEquals('"abba"', '"abba"')
self.assertJSONOrSetEquals('', '')
def test_none(self):
self.assertJSONOrSetEquals(None, None)
def test_error(self):
self.assertJSONOrSetTypeError(['a'])
self.assertJSONOrSetTypeError(1.023)
self.assertJSONOrSetTypeError(3)
self.assertJSONOrSetTypeError([1])
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class DateTest(FieldTest):
"""
Tests of the Date field.
"""
FIELD_TO_TEST = DateTime
def test_json_equals(self):
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4, 567890).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04.567890'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04.000000'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04Z'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc)
)
def test_serialize(self):
self.assertToJSONEquals(
'2014-04-01T02:03:04.567890',
dt.datetime(2014, 4, 1, 2, 3, 4, 567890).replace(tzinfo=pytz.utc)
)
self.assertToJSONEquals(
'2014-04-01T02:03:04.000000',
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc)
)
def test_none(self):
self.assertJSONOrSetEquals(None, None)
self.assertJSONOrSetEquals(None, '')
self.assertEqual(DateTime().to_json(None), None)
def test_error(self):
self.assertJSONOrSetTypeError(['a'])
self.assertJSONOrSetTypeError(5)
self.assertJSONOrSetTypeError(5.123)
def test_date_format_error(self):
with self.assertRaises(ValueError):
DateTime().from_json('invalid')
def test_serialize_error(self):
with self.assertRaises(TypeError):
DateTime().to_json('not a datetime')
class AnyTest(FieldTest):
"""
Tests the Any Field.
"""
FIELD_TO_TEST = Any
def test_json_equals(self):
self.assertJSONOrSetEquals({'bar'}, {'bar'})
self.assertJSONOrSetEquals("abba", "abba")
self.assertJSONOrSetEquals('', '')
self.assertJSONOrSetEquals('3.2', '3.2')
self.assertJSONOrSetEquals(False, False)
self.assertJSONOrSetEquals([3, 4], [3, 4])
def test_none(self):
self.assertJSONOrSetEquals(None, None)
class ListTest(FieldTest):
"""
Tests the List Field.
"""
FIELD_TO_TEST = List
def test_json_equals(self):
self.assertJSONOrSetEquals([], [])
self.assertJSONOrSetEquals(['foo', 'bar'], ['foo', 'bar'])
self.assertJSONOrSetEquals([1, 3.4], [1, 3.4])
def test_none(self):
self.assertJSONOrSetEquals(None, None)
def test_error(self):
self.assertJSONOrSetTypeError('abc')
self.assertJSONOrSetTypeError('')
self.assertJSONOrSetTypeError('1.23')
self.assertJSONOrSetTypeError('true')
self.assertJSONOrSetTypeError(3.7)
self.assertJSONOrSetTypeError(True)
self.assertJSONOrSetTypeError({})
class ReferenceTest(FieldTest):
"""
Tests the Reference Field.
"""
FIELD_TO_TEST = Reference
def test_json_equals(self):
self.assertJSONOrSetEquals({'id': 'bar', 'usage': 'baz'}, {'id': 'bar', 'usage': 'baz'})
self.assertJSONOrSetEquals("i4x://myu/mycourse/problem/myproblem", "i4x://myu/mycourse/problem/myproblem")
self.assertJSONOrSetEquals('', '')
self.assertJSONOrSetEquals(3.2, 3.2)
self.assertJSONOrSetEquals(False, False)
self.assertJSONOrSetEquals([3, 4], [3, 4])
def test_none(self):
self.assertJSONOrSetEquals(None, None)
class ReferenceListTest(FieldTest):
"""
Tests the ReferenceList Field.
"""
FIELD_TO_TEST = ReferenceList
def test_json_equals(self):
self.assertJSONOrSetEquals([], [])
self.assertJSONOrSetEquals(['foo', 'bar'], ['foo', 'bar'])
self.assertJSONOrSetEquals([1, 3.4], [1, 3.4])
def test_none(self):
self.assertJSONOrSetEquals(None, None)
def test_error(self):
self.assertJSONOrSetTypeError('abc')
self.assertJSONOrSetTypeError('')
self.assertJSONOrSetTypeError('1.23')
self.assertJSONOrSetTypeError('true')
self.assertJSONOrSetTypeError(3.7)
self.assertJSONOrSetTypeError(True)
self.assertJSONOrSetTypeError({})
class DictTest(FieldTest):
"""
Tests the Dict Field.
"""
FIELD_TO_TEST = Dict
def test_json_equals(self):
self.assertJSONOrSetEquals({}, {})
self.assertJSONOrSetEquals({'a': 'b', 'c': 3}, {'a': 'b', 'c': 3})
def test_none(self):
self.assertJSONOrSetEquals(None, None)
def test_error(self):
self.assertJSONOrSetTypeError(['foo', 'bar'])
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError('abc')
self.assertJSONOrSetTypeError('1.23')
self.assertJSONOrSetTypeError('true')
self.assertJSONOrSetTypeError(3.7)
self.assertJSONOrSetTypeError(True)
def test_field_name_defaults():
# Tests field display name default values
attempts = Integer()
attempts.__name__ = "max_problem_attempts"
assert_equals('max_problem_attempts', attempts.display_name)
class TestBlock(XBlock):
"""
Block for testing
"""
field_x = List()
assert_equals("field_x", TestBlock.field_x.display_name)
def test_scope_key():
# Tests field display name default values
class TestBlock(XBlock):
"""
Block for testing
"""
field_x = List(scope=Scope.settings, name='')
settings_lst = List(scope=Scope.settings, name='')
uss_lst = List(scope=Scope.user_state_summary, name='')
user_lst = List(scope=Scope.user_state, name='')
pref_lst = List(scope=Scope.preferences, name='')
user_info_lst = List(scope=Scope.user_info, name='')
sids = ScopeIds(user_id="_bob",
block_type="b.12#ob",
def_id="..",
usage_id="..")
field_data = DictFieldData({})
runtime = TestRuntime(Mock(), services={'field-data': field_data})
block = TestBlock(runtime, None, sids)
# Format: usage or block ID/field_name/user_id
for item, correct_key in [[TestBlock.field_x, "__..../field__x/NONE.NONE"],
[TestBlock.user_info_lst, "NONE.NONE/user__info__lst/____bob"],
[TestBlock.pref_lst, "b..12_35_ob/pref__lst/____bob"],
[TestBlock.user_lst, "__..../user__lst/____bob"],
[TestBlock.uss_lst, "__..../uss__lst/NONE.NONE"],
[TestBlock.settings_lst, "__..../settings__lst/NONE.NONE"]]:
key = scope_key(item, block)
assert_equals(key, correct_key)
def test_field_display_name():
attempts = Integer(display_name='Maximum Problem Attempts')
attempts._name = "max_problem_attempts"
assert_equals("Maximum Problem Attempts", attempts.display_name)
boolean_field = Boolean(display_name="boolean field")
assert_equals("boolean field", boolean_field.display_name)
class TestBlock(XBlock):
"""
Block for testing
"""
field_x = List(display_name="Field Known as X")
assert_equals("Field Known as X", TestBlock.field_x.display_name)
def test_unique_id_default():
class TestBlock(XBlock):
"""
Block for testing
"""
field_a = String(default=UNIQUE_ID, scope=Scope.settings)
field_b = String(default=UNIQUE_ID, scope=Scope.user_state)
sids = ScopeIds(user_id="bob",
block_type="bobs-type",
def_id="definition-id",
usage_id="usage-id")
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = TestBlock(runtime, DictFieldData({}), sids)
unique_a = block.field_a
unique_b = block.field_b
# Create another instance of the same block. Unique ID defaults should not change.
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = TestBlock(runtime, DictFieldData({}), sids)
assert_equals(unique_a, block.field_a)
assert_equals(unique_b, block.field_b)
# Change the user id. Unique ID default should change for field_b with
# user_state scope, but not for field_a with scope=settings.
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = TestBlock(runtime, DictFieldData({}), sids._replace(user_id='alice'))
assert_equals(unique_a, block.field_a)
assert_not_equals(unique_b, block.field_b)
# Change the usage id. Unique ID default for both fields should change.
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = TestBlock(runtime, DictFieldData({}), sids._replace(usage_id='usage-2'))
assert_not_equals(unique_a, block.field_a)
assert_not_equals(unique_b, block.field_b)
def test_values():
# static return value
field_values = ['foo', 'bar']
test_field = String(values=field_values)
assert_equals(field_values, test_field.values)
# function to generate values
test_field = String(values=lambda: [1, 4])
assert_equals([1, 4], test_field.values)
# default if nothing specified
assert_equals(None, String().values)
def test_values_boolean():
# Test Boolean, which has values defined
test_field = Boolean()
assert_equals(
({'display_name': "True", "value": True}, {'display_name': "False", "value": False}),
test_field.values
)
def test_values_dict():
# Test that the format expected for integers is allowed
test_field = Integer(values={"min": 1, "max": 100})
assert_equals({"min": 1, "max": 100}, test_field.values)
def test_set_incomparable_fields():
# if we can't compare a field's value to the value it's going to be reset to
# (i.e. timezone aware and unaware datetimes), just reset the value.
class FieldTester(XBlock):
"""Test block for this test."""
incomparable = Field(scope=Scope.settings)
not_timezone_aware = dt.datetime(2015, 1, 1)
timezone_aware = dt.datetime(2015, 1, 1, tzinfo=pytz.UTC)
runtime = TestRuntime(services={'field-data': DictFieldData({})})
field_tester = FieldTester(runtime, scope_ids=Mock(spec=ScopeIds))
field_tester.incomparable = not_timezone_aware
field_tester.incomparable = timezone_aware
assert_equals(field_tester.incomparable, timezone_aware)
def test_twofaced_field_access():
# Check that a field with different to_json and from_json representations
# persists and saves correctly.
class TwoFacedField(Field):
"""A field that emits different 'json' than it parses."""
def from_json(self, thestr):
"""Store an int, the length of the string parsed."""
return len(thestr)
def to_json(self, value):
"""Emit some number of X's."""
return "X" * value
class FieldTester(XBlock):
"""Test block for TwoFacedField."""
how_many = TwoFacedField(scope=Scope.settings)
original_json = "YYY"
runtime = TestRuntime(services={'field-data': DictFieldData({'how_many': original_json})})
field_tester = FieldTester(runtime, scope_ids=Mock(spec=ScopeIds))
# Test that the native value isn't equal to the original json we specified.
assert_not_equals(field_tester.how_many, original_json)
# Test that the native -> json value isn't equal to the original json we specified.
assert_not_equals(TwoFacedField().to_json(field_tester.how_many), original_json)
# The previous accesses will mark the field as dirty (via __get__)
assert_equals(len(field_tester._dirty_fields), 1)
# However, the field should not ACTUALLY be marked as a field that is needing to be saved.
assert_not_in('how_many', field_tester._get_fields_to_save()) # pylint: disable=W0212
def test_setting_the_same_value_marks_field_as_dirty():
"""
Check that setting field to the same value marks mutable fields as dirty.
However, since the value hasn't changed, these fields won't be saved.
"""
class FieldTester(XBlock):
"""Test block for set - get test."""
non_mutable = String(scope=Scope.settings)
list_field = List(scope=Scope.settings)
dict_field = Dict(scope=Scope.settings)
runtime = TestRuntime(services={'field-data': DictFieldData({})})
field_tester = FieldTester(runtime, scope_ids=Mock(spec=ScopeIds))
# precondition checks
assert_equals(len(field_tester._dirty_fields), 0)
assert_false(field_tester.fields['list_field'].is_set_on(field_tester))
assert_false(field_tester.fields['dict_field'].is_set_on(field_tester))
assert_false(field_tester.fields['non_mutable'].is_set_on(field_tester))
field_tester.non_mutable = field_tester.non_mutable
field_tester.list_field = field_tester.list_field
field_tester.dict_field = field_tester.dict_field
assert_not_in(field_tester.fields['non_mutable'], field_tester._dirty_fields)
assert_in(field_tester.fields['list_field'], field_tester._dirty_fields)
assert_in(field_tester.fields['dict_field'], field_tester._dirty_fields)
assert_false(field_tester.fields['non_mutable'].is_set_on(field_tester))
assert_false(field_tester.fields['list_field'].is_set_on(field_tester))
assert_false(field_tester.fields['dict_field'].is_set_on(field_tester))
class SentinelTest(unittest.TestCase):
"""
Tests of :ref:`xblock.fields.Sentinel`.
"""
def test_equality(self):
base = Sentinel('base')
self.assertEquals(base, base)
self.assertEquals(base, Sentinel('base'))
self.assertNotEquals(base, Sentinel('foo'))
self.assertNotEquals(base, 'base')
def test_hashing(self):
base = Sentinel('base')
a_dict = {base: True}
self.assertEquals(a_dict[Sentinel('base')], True)
self.assertEquals(a_dict[base], True)
self.assertNotIn(Sentinel('foo'), a_dict)
self.assertNotIn('base', a_dict)
@ddt.ddt
class FieldSerializationTest(unittest.TestCase):
"""
Tests field.from_string and field.to_string methods.
"""
def assert_to_string(self, _type, value, string):
"""
Helper method: checks if _type's to_string given instance of _type returns expected string
"""
result = _type(enforce_type=True).to_string(value)
self.assertEquals(result, string)
def assert_from_string(self, _type, string, value):
"""
Helper method: checks if _type's from_string given string representation of type returns expected value
"""
result = _type(enforce_type=True).from_string(string)
self.assertEquals(result, value)
@ddt.unpack
@ddt.data(
(Integer, 0, '0'),
(Integer, 5, '5'),
(Integer, -1023, '-1023'),
(Integer, 12345678, "12345678"),
(Float, 5.321, '5.321'),
(Float, -1023.35, '-1023.35'),
(Float, 1e+100, '1e+100'),
(Float, float('inf'), 'Infinity'),
(Float, float('-inf'), '-Infinity'),
(Boolean, True, "true"),
(Boolean, False, "false"),
(Integer, True, 'true'),
(String, "", ""),
(String, "foo", 'foo'),
(String, "bar", 'bar'),
(Dict, {}, '{}'),
(List, [], '[]'),
(Dict, {"foo": 1, "bar": 2}, textwrap.dedent("""\
{
"bar": 2,
"foo": 1
}""")),
(List, [1, 2, 3], textwrap.dedent("""\
[
1,
2,
3
]""")),
(Dict, {"foo": [1, 2, 3], "bar": 2}, textwrap.dedent("""\
{
"bar": 2,
"foo": [
1,
2,
3
]
}""")))
def test_both_directions(self, _type, value, string):
"""Easy cases that work in both directions."""
self.assert_to_string(_type, value, string)
self.assert_from_string(_type, string, value)
@ddt.unpack
@ddt.data(
(Float, 0.0, r"0|0\.0*"),
(Float, 1.0, r"1|1\.0*"),
(Float, -10.0, r"-10|-10\.0*"))
def test_to_string_regexp_matches(self, _type, value, regexp):
result = _type(enforce_type=True).to_string(value)
self.assertRegexpMatches(result, regexp)
@ddt.unpack
@ddt.data(
(Integer, "0xff", 0xff),
(Integer, "0b01", 1),
(Integer, "0b10", 2),
(Float, '0', 0.0),
(Float, '0.0', 0.0),
(Float, '-10', -10.0),
(Float, '-10.0', -10.0),
(Boolean, 'TRUE', True),
(Boolean, 'FALSE', False),
(
Dict,
textwrap.dedent("""\
foo: 1
bar: 2.124
baz: True
kuu: some string
"""),
{"foo": 1, "bar": 2.124, "baz": True, "kuu": "some string"}
),
(
List,
textwrap.dedent("""\
- 1
- 2.345
- true
- false
- null
- some string
"""),
[1, 2.345, True, False, None, "some string"]
),
(
Dict,
textwrap.dedent("""\
foo: 1
bar: [1, 2, 3]
"""),
{"foo": 1, "bar": [1, 2, 3]}
),
(
Dict,
textwrap.dedent("""\
foo: 1
bar:
- 1
- 2
- meow: true
woof: false
kaw: null
"""),
{"foo": 1, "bar": [1, 2, {"meow": True, "woof": False, "kaw": None}]}
),
(
List,
textwrap.dedent("""\
- 1
- 2.345
- {"foo": true, "bar": [1,2,3]}
- meow: false
woof: true
kaw: null
"""),
[1, 2.345, {"foo": True, "bar": [1, 2, 3]}, {"meow": False, "woof": True, "kaw": None}]
)
)
def test_from_string(self, _type, string, value):
self.assert_from_string(_type, string, value)
def test_float_from_NaN_is_nan(self): # pylint: disable=invalid-name
"""Test parsing of NaN.
This special test case is necessary since
float('nan') compares inequal to everything.
"""
result = Float(enforce_type=True).from_string('NaN')
self.assertTrue(math.isnan(result))
@ddt.unpack
@ddt.data(*itertools.product(
[Integer, Float],
['{"foo":"bar"}', '[1, 2, 3]', 'baz', '1.abc', 'defg']))
def test_from_string_errors(self, _type, string):
""" Cases that raises various exceptions."""
with self.assertRaises(StandardError):
_type(enforce_type=True).from_string(string)
| GbalsaC/bitnamiP | XBlock/xblock/test/test_fields.py | Python | agpl-3.0 | 26,030 | 0.000845 |
# -*- coding: utf-8 -*-
# (c) 2015 Antiun Ingeniería S.L. - Pedro M. Baeza
# (c) 2015 AvanzOSC - Ainara Galdona
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0
{
"name": "AEAT - Prorrata de IVA",
"version": "8.0.2.0.0",
"license": "AGPL-3",
"author": "AvanzOSC, "
"Antiun Ingeniería S.L., "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-spain",
"category": "Accounting",
"depends": [
'l10n_es_aeat_mod303',
],
"data": [
"data/tax_code_map_mod303_data.xml",
"data/aeat_export_mod303_data.xml",
'wizard/l10n_es_aeat_compute_vat_prorrate_view.xml',
'views/mod303_view.xml'
],
"installable": True,
}
| Endika/l10n-spain | l10n_es_aeat_vat_prorrate/__openerp__.py | Python | agpl-3.0 | 812 | 0 |
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from StringIO import StringIO
import messages
_messages_file_contents = """# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "config.h"
#if ENABLE(WEBKIT2)
messages -> WebPage {
LoadURL(WTF::String url)
#if ENABLE(TOUCH_EVENTS)
TouchEvent(WebKit::WebTouchEvent event)
#endif
DidReceivePolicyDecision(uint64_t frameID, uint64_t listenerID, uint32_t policyAction)
Close()
PreferencesDidChange(WebKit::WebPreferencesStore store)
SendDoubleAndFloat(double d, float f)
SendInts(Vector<uint64_t> ints, Vector<Vector<uint64_t> > intVectors)
CreatePlugin(uint64_t pluginInstanceID, WebKit::Plugin::Parameters parameters) -> (bool result)
RunJavaScriptAlert(uint64_t frameID, WTF::String message) -> ()
GetPlugins(bool refresh) -> (Vector<WebCore::PluginInfo> plugins) DispatchOnConnectionQueue
GetPluginProcessConnection(WTF::String pluginPath) -> (CoreIPC::Connection::Handle connectionHandle) Delayed
TestMultipleAttributes() -> () DispatchOnConnectionQueue Delayed
#if PLATFORM(MAC)
DidCreateWebProcessConnection(CoreIPC::MachPort connectionIdentifier)
#endif
#if PLATFORM(MAC)
# Keyboard support
InterpretKeyEvent(uint32_t type) -> (Vector<WebCore::KeypressCommand> commandName)
#endif
}
#endif
"""
_expected_results = {
'name': 'WebPage',
'condition': 'ENABLE(WEBKIT2)',
'messages': (
{
'name': 'LoadURL',
'parameters': (
('WTF::String', 'url'),
),
'condition': None,
},
{
'name': 'TouchEvent',
'parameters': (
('WebKit::WebTouchEvent', 'event'),
),
'condition': 'ENABLE(TOUCH_EVENTS)',
},
{
'name': 'DidReceivePolicyDecision',
'parameters': (
('uint64_t', 'frameID'),
('uint64_t', 'listenerID'),
('uint32_t', 'policyAction'),
),
'condition': None,
},
{
'name': 'Close',
'parameters': (),
'condition': None,
},
{
'name': 'PreferencesDidChange',
'parameters': (
('WebKit::WebPreferencesStore', 'store'),
),
'condition': None,
},
{
'name': 'SendDoubleAndFloat',
'parameters': (
('double', 'd'),
('float', 'f'),
),
'condition': None,
},
{
'name': 'SendInts',
'parameters': (
('Vector<uint64_t>', 'ints'),
('Vector<Vector<uint64_t> >', 'intVectors')
),
'condition': None,
},
{
'name': 'CreatePlugin',
'parameters': (
('uint64_t', 'pluginInstanceID'),
('WebKit::Plugin::Parameters', 'parameters')
),
'reply_parameters': (
('bool', 'result'),
),
'condition': None,
},
{
'name': 'RunJavaScriptAlert',
'parameters': (
('uint64_t', 'frameID'),
('WTF::String', 'message')
),
'reply_parameters': (),
'condition': None,
},
{
'name': 'GetPlugins',
'parameters': (
('bool', 'refresh'),
),
'reply_parameters': (
('Vector<WebCore::PluginInfo>', 'plugins'),
),
'condition': None,
},
{
'name': 'GetPluginProcessConnection',
'parameters': (
('WTF::String', 'pluginPath'),
),
'reply_parameters': (
('CoreIPC::Connection::Handle', 'connectionHandle'),
),
'condition': None,
},
{
'name': 'TestMultipleAttributes',
'parameters': (
),
'reply_parameters': (
),
'condition': None,
},
{
'name': 'DidCreateWebProcessConnection',
'parameters': (
('CoreIPC::MachPort', 'connectionIdentifier'),
),
'condition': 'PLATFORM(MAC)',
},
{
'name': 'InterpretKeyEvent',
'parameters': (
('uint32_t', 'type'),
),
'reply_parameters': (
('Vector<WebCore::KeypressCommand>', 'commandName'),
),
'condition': 'PLATFORM(MAC)',
},
),
}
class MessagesTest(unittest.TestCase):
def setUp(self):
self.receiver = messages.MessageReceiver.parse(StringIO(_messages_file_contents))
class ParsingTest(MessagesTest):
def check_message(self, message, expected_message):
self.assertEquals(message.name, expected_message['name'])
self.assertEquals(len(message.parameters), len(expected_message['parameters']))
for index, parameter in enumerate(message.parameters):
self.assertEquals(parameter.type, expected_message['parameters'][index][0])
self.assertEquals(parameter.name, expected_message['parameters'][index][1])
if message.reply_parameters != None:
for index, parameter in enumerate(message.reply_parameters):
self.assertEquals(parameter.type, expected_message['reply_parameters'][index][0])
self.assertEquals(parameter.name, expected_message['reply_parameters'][index][1])
else:
self.assertFalse('reply_parameters' in expected_message)
self.assertEquals(message.condition, expected_message['condition'])
def test_receiver(self):
"""Receiver should be parsed as expected"""
self.assertEquals(self.receiver.name, _expected_results['name'])
self.assertEquals(self.receiver.condition, _expected_results['condition'])
self.assertEquals(len(self.receiver.messages), len(_expected_results['messages']))
for index, message in enumerate(self.receiver.messages):
self.check_message(message, _expected_results['messages'][index])
_expected_header = """/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef WebPageMessages_h
#define WebPageMessages_h
#if ENABLE(WEBKIT2)
#include "Arguments.h"
#include "Connection.h"
#include "MessageID.h"
#include "Plugin.h"
#include <WebCore/KeyboardEvent.h>
#include <WebCore/PluginData.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Vector.h>
namespace CoreIPC {
class ArgumentEncoder;
class Connection;
class MachPort;
}
namespace WTF {
class String;
}
namespace WebKit {
struct WebPreferencesStore;
class WebTouchEvent;
}
namespace Messages {
namespace WebPage {
enum Kind {
LoadURLID,
#if ENABLE(TOUCH_EVENTS)
TouchEventID,
#endif
DidReceivePolicyDecisionID,
CloseID,
PreferencesDidChangeID,
SendDoubleAndFloatID,
SendIntsID,
CreatePluginID,
RunJavaScriptAlertID,
GetPluginsID,
GetPluginProcessConnectionID,
TestMultipleAttributesID,
#if PLATFORM(MAC)
DidCreateWebProcessConnectionID,
#endif
#if PLATFORM(MAC)
InterpretKeyEventID,
#endif
};
struct LoadURL : CoreIPC::Arguments1<const WTF::String&> {
static const Kind messageID = LoadURLID;
typedef CoreIPC::Arguments1<const WTF::String&> DecodeType;
explicit LoadURL(const WTF::String& url)
: CoreIPC::Arguments1<const WTF::String&>(url)
{
}
};
#if ENABLE(TOUCH_EVENTS)
struct TouchEvent : CoreIPC::Arguments1<const WebKit::WebTouchEvent&> {
static const Kind messageID = TouchEventID;
typedef CoreIPC::Arguments1<const WebKit::WebTouchEvent&> DecodeType;
explicit TouchEvent(const WebKit::WebTouchEvent& event)
: CoreIPC::Arguments1<const WebKit::WebTouchEvent&>(event)
{
}
};
#endif
struct DidReceivePolicyDecision : CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t> {
static const Kind messageID = DidReceivePolicyDecisionID;
typedef CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t> DecodeType;
DidReceivePolicyDecision(uint64_t frameID, uint64_t listenerID, uint32_t policyAction)
: CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t>(frameID, listenerID, policyAction)
{
}
};
struct Close : CoreIPC::Arguments0 {
static const Kind messageID = CloseID;
typedef CoreIPC::Arguments0 DecodeType;
};
struct PreferencesDidChange : CoreIPC::Arguments1<const WebKit::WebPreferencesStore&> {
static const Kind messageID = PreferencesDidChangeID;
typedef CoreIPC::Arguments1<const WebKit::WebPreferencesStore&> DecodeType;
explicit PreferencesDidChange(const WebKit::WebPreferencesStore& store)
: CoreIPC::Arguments1<const WebKit::WebPreferencesStore&>(store)
{
}
};
struct SendDoubleAndFloat : CoreIPC::Arguments2<double, float> {
static const Kind messageID = SendDoubleAndFloatID;
typedef CoreIPC::Arguments2<double, float> DecodeType;
SendDoubleAndFloat(double d, float f)
: CoreIPC::Arguments2<double, float>(d, f)
{
}
};
struct SendInts : CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t> >&> {
static const Kind messageID = SendIntsID;
typedef CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t> >&> DecodeType;
SendInts(const Vector<uint64_t>& ints, const Vector<Vector<uint64_t> >& intVectors)
: CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t> >&>(ints, intVectors)
{
}
};
struct CreatePlugin : CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&> {
static const Kind messageID = CreatePluginID;
typedef CoreIPC::Arguments1<bool&> Reply;
typedef CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&> DecodeType;
CreatePlugin(uint64_t pluginInstanceID, const WebKit::Plugin::Parameters& parameters)
: CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&>(pluginInstanceID, parameters)
{
}
};
struct RunJavaScriptAlert : CoreIPC::Arguments2<uint64_t, const WTF::String&> {
static const Kind messageID = RunJavaScriptAlertID;
typedef CoreIPC::Arguments0 Reply;
typedef CoreIPC::Arguments2<uint64_t, const WTF::String&> DecodeType;
RunJavaScriptAlert(uint64_t frameID, const WTF::String& message)
: CoreIPC::Arguments2<uint64_t, const WTF::String&>(frameID, message)
{
}
};
struct GetPlugins : CoreIPC::Arguments1<bool> {
static const Kind messageID = GetPluginsID;
typedef CoreIPC::Arguments1<Vector<WebCore::PluginInfo>&> Reply;
typedef CoreIPC::Arguments1<bool> DecodeType;
explicit GetPlugins(bool refresh)
: CoreIPC::Arguments1<bool>(refresh)
{
}
};
struct GetPluginProcessConnection : CoreIPC::Arguments1<const WTF::String&> {
static const Kind messageID = GetPluginProcessConnectionID;
struct DelayedReply : public ThreadSafeRefCounted<DelayedReply> {
DelayedReply(PassRefPtr<CoreIPC::Connection>, PassOwnPtr<CoreIPC::ArgumentEncoder>);
~DelayedReply();
bool send(const CoreIPC::Connection::Handle& connectionHandle);
private:
RefPtr<CoreIPC::Connection> m_connection;
OwnPtr<CoreIPC::ArgumentEncoder> m_arguments;
};
typedef CoreIPC::Arguments1<CoreIPC::Connection::Handle&> Reply;
typedef CoreIPC::Arguments1<const WTF::String&> DecodeType;
explicit GetPluginProcessConnection(const WTF::String& pluginPath)
: CoreIPC::Arguments1<const WTF::String&>(pluginPath)
{
}
};
struct TestMultipleAttributes : CoreIPC::Arguments0 {
static const Kind messageID = TestMultipleAttributesID;
struct DelayedReply : public ThreadSafeRefCounted<DelayedReply> {
DelayedReply(PassRefPtr<CoreIPC::Connection>, PassOwnPtr<CoreIPC::ArgumentEncoder>);
~DelayedReply();
bool send();
private:
RefPtr<CoreIPC::Connection> m_connection;
OwnPtr<CoreIPC::ArgumentEncoder> m_arguments;
};
typedef CoreIPC::Arguments0 Reply;
typedef CoreIPC::Arguments0 DecodeType;
};
#if PLATFORM(MAC)
struct DidCreateWebProcessConnection : CoreIPC::Arguments1<const CoreIPC::MachPort&> {
static const Kind messageID = DidCreateWebProcessConnectionID;
typedef CoreIPC::Arguments1<const CoreIPC::MachPort&> DecodeType;
explicit DidCreateWebProcessConnection(const CoreIPC::MachPort& connectionIdentifier)
: CoreIPC::Arguments1<const CoreIPC::MachPort&>(connectionIdentifier)
{
}
};
#endif
#if PLATFORM(MAC)
struct InterpretKeyEvent : CoreIPC::Arguments1<uint32_t> {
static const Kind messageID = InterpretKeyEventID;
typedef CoreIPC::Arguments1<Vector<WebCore::KeypressCommand>&> Reply;
typedef CoreIPC::Arguments1<uint32_t> DecodeType;
explicit InterpretKeyEvent(uint32_t type)
: CoreIPC::Arguments1<uint32_t>(type)
{
}
};
#endif
} // namespace WebPage
} // namespace Messages
namespace CoreIPC {
template<> struct MessageKindTraits<Messages::WebPage::Kind> {
static const MessageClass messageClass = MessageClassWebPage;
};
} // namespace CoreIPC
#endif // ENABLE(WEBKIT2)
#endif // WebPageMessages_h
"""
_expected_receiver_implementation = """/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#if ENABLE(WEBKIT2)
#include "WebPage.h"
#if PLATFORM(MAC)
#include "ArgumentCoders.h"
#endif
#include "ArgumentDecoder.h"
#include "Connection.h"
#include "HandleMessage.h"
#if PLATFORM(MAC)
#include "MachPort.h"
#endif
#include "Plugin.h"
#if PLATFORM(MAC)
#include "WebCoreArgumentCoders.h"
#endif
#if ENABLE(TOUCH_EVENTS)
#include "WebEvent.h"
#endif
#include "WebPageMessages.h"
#include "WebPreferencesStore.h"
namespace Messages {
namespace WebPage {
GetPluginProcessConnection::DelayedReply::DelayedReply(PassRefPtr<CoreIPC::Connection> connection, PassOwnPtr<CoreIPC::ArgumentEncoder> arguments)
: m_connection(connection)
, m_arguments(arguments)
{
}
GetPluginProcessConnection::DelayedReply::~DelayedReply()
{
ASSERT(!m_connection);
}
bool GetPluginProcessConnection::DelayedReply::send(const CoreIPC::Connection::Handle& connectionHandle)
{
ASSERT(m_arguments);
m_arguments->encode(connectionHandle);
bool result = m_connection->sendSyncReply(m_arguments.release());
m_connection = nullptr;
return result;
}
TestMultipleAttributes::DelayedReply::DelayedReply(PassRefPtr<CoreIPC::Connection> connection, PassOwnPtr<CoreIPC::ArgumentEncoder> arguments)
: m_connection(connection)
, m_arguments(arguments)
{
}
TestMultipleAttributes::DelayedReply::~DelayedReply()
{
ASSERT(!m_connection);
}
bool TestMultipleAttributes::DelayedReply::send()
{
ASSERT(m_arguments);
bool result = m_connection->sendSyncReply(m_arguments.release());
m_connection = nullptr;
return result;
}
} // namespace WebPage
} // namespace Messages
namespace WebKit {
void WebPage::didReceiveWebPageMessage(CoreIPC::Connection*, CoreIPC::MessageID messageID, CoreIPC::ArgumentDecoder* arguments)
{
switch (messageID.get<Messages::WebPage::Kind>()) {
case Messages::WebPage::LoadURLID:
CoreIPC::handleMessage<Messages::WebPage::LoadURL>(arguments, this, &WebPage::loadURL);
return;
#if ENABLE(TOUCH_EVENTS)
case Messages::WebPage::TouchEventID:
CoreIPC::handleMessage<Messages::WebPage::TouchEvent>(arguments, this, &WebPage::touchEvent);
return;
#endif
case Messages::WebPage::DidReceivePolicyDecisionID:
CoreIPC::handleMessage<Messages::WebPage::DidReceivePolicyDecision>(arguments, this, &WebPage::didReceivePolicyDecision);
return;
case Messages::WebPage::CloseID:
CoreIPC::handleMessage<Messages::WebPage::Close>(arguments, this, &WebPage::close);
return;
case Messages::WebPage::PreferencesDidChangeID:
CoreIPC::handleMessage<Messages::WebPage::PreferencesDidChange>(arguments, this, &WebPage::preferencesDidChange);
return;
case Messages::WebPage::SendDoubleAndFloatID:
CoreIPC::handleMessage<Messages::WebPage::SendDoubleAndFloat>(arguments, this, &WebPage::sendDoubleAndFloat);
return;
case Messages::WebPage::SendIntsID:
CoreIPC::handleMessage<Messages::WebPage::SendInts>(arguments, this, &WebPage::sendInts);
return;
#if PLATFORM(MAC)
case Messages::WebPage::DidCreateWebProcessConnectionID:
CoreIPC::handleMessage<Messages::WebPage::DidCreateWebProcessConnection>(arguments, this, &WebPage::didCreateWebProcessConnection);
return;
#endif
default:
break;
}
ASSERT_NOT_REACHED();
}
CoreIPC::SyncReplyMode WebPage::didReceiveSyncWebPageMessage(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::ArgumentDecoder* arguments, CoreIPC::ArgumentEncoder* reply)
{
switch (messageID.get<Messages::WebPage::Kind>()) {
case Messages::WebPage::CreatePluginID:
CoreIPC::handleMessage<Messages::WebPage::CreatePlugin>(arguments, reply, this, &WebPage::createPlugin);
return CoreIPC::AutomaticReply;
case Messages::WebPage::RunJavaScriptAlertID:
CoreIPC::handleMessage<Messages::WebPage::RunJavaScriptAlert>(arguments, reply, this, &WebPage::runJavaScriptAlert);
return CoreIPC::AutomaticReply;
case Messages::WebPage::GetPluginsID:
CoreIPC::handleMessage<Messages::WebPage::GetPlugins>(arguments, reply, this, &WebPage::getPlugins);
return CoreIPC::AutomaticReply;
case Messages::WebPage::GetPluginProcessConnectionID:
CoreIPC::handleMessageDelayed<Messages::WebPage::GetPluginProcessConnection>(connection, arguments, reply, this, &WebPage::getPluginProcessConnection);
return CoreIPC::ManualReply;
case Messages::WebPage::TestMultipleAttributesID:
CoreIPC::handleMessageDelayed<Messages::WebPage::TestMultipleAttributes>(connection, arguments, reply, this, &WebPage::testMultipleAttributes);
return CoreIPC::ManualReply;
#if PLATFORM(MAC)
case Messages::WebPage::InterpretKeyEventID:
CoreIPC::handleMessage<Messages::WebPage::InterpretKeyEvent>(arguments, reply, this, &WebPage::interpretKeyEvent);
return CoreIPC::AutomaticReply;
#endif
default:
break;
}
ASSERT_NOT_REACHED();
return CoreIPC::AutomaticReply;
}
} // namespace WebKit
#endif // ENABLE(WEBKIT2)
"""
class GeneratedFileContentsTest(unittest.TestCase):
def assertGeneratedFileContentsEqual(self, first, second):
first_list = first.split('\n')
second_list = second.split('\n')
for index, first_line in enumerate(first_list):
self.assertEquals(first_line, second_list[index])
self.assertEquals(len(first_list), len(second_list))
class HeaderTest(GeneratedFileContentsTest):
def test_header(self):
file_contents = messages.generate_messages_header(StringIO(_messages_file_contents))
self.assertGeneratedFileContentsEqual(file_contents, _expected_header)
class ReceiverImplementationTest(GeneratedFileContentsTest):
def test_receiver_implementation(self):
file_contents = messages.generate_message_handler(StringIO(_messages_file_contents))
self.assertGeneratedFileContentsEqual(file_contents, _expected_receiver_implementation)
if __name__ == '__main__':
unittest.main()
| mogoweb/webkit_for_android5.1 | webkit/Source/WebKit2/Scripts/webkit2/messages_unittest.py | Python | apache-2.0 | 24,676 | 0.002432 |
# -*- coding: utf-8 -*-
##
## test_account.py
## Login : <dax@happycoders.org>
## Started on Wed Feb 14 08:23:17 2007 David Rousselie
## $Id$
##
## Copyright (C) 2007 David Rousselie
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
import unittest
import thread
from jcl.tests import JCLTestCase
import jcl.model as model
from jcl.error import FieldError
from jcl.model.account import Account, PresenceAccount, User
import jmc.model.account
from jmc.model.account import MailAccount, POP3Account, IMAPAccount, \
GlobalSMTPAccount, AbstractSMTPAccount, SMTPAccount
from jmc.lang import Lang
from jcl.model.tests.account import Account_TestCase, \
PresenceAccount_TestCase, InheritableAccount_TestCase, \
ExampleAccount
from jmc.model.tests import email_generator, server
class AccountModule_TestCase(unittest.TestCase):
def test_validate_login_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
None, None, None)
def test_validate_login_with_login_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
"login with spaces", None, None)
def test_validate_host_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
None, None, None)
def test_validate_host_with_host_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
"host with spaces", None, None)
class MailAccount_TestCase(PresenceAccount_TestCase):
def setUp(self):
PresenceAccount_TestCase.setUp(self, tables=[MailAccount])
self.account = MailAccount(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com")
self.account_class = MailAccount
def make_test(email_type, tested_func, expected_res):
def inner(self):
encoded, multipart, header = email_type
email = email_generator.generate(encoded,
multipart,
header)
part = tested_func(self, email)
self.assertEquals(part, expected_res)
return inner
test_get_decoded_part_not_encoded = \
make_test((False, False, False), \
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Not encoded single part")
test_get_decoded_part_encoded = \
make_test((True, False, False),
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Encoded single part with 'iso-8859-15' charset (éàê)")
test_format_message_summary_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : not encoded from\nSubject : not encoded subject\n\n",
u"not encoded from"))
test_format_message_summary_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\n",
u"encoded from (éàê)"))
test_format_message_summary_partial_encoded = \
make_test((True, False, True),
lambda self, email: \
email.replace_header("Subject",
"\" " + str(email["Subject"]) \
+ " \" not encoded part") or \
email.replace_header("From",
"\" " + str(email["From"]) \
+ " \" not encoded part") or \
self.account.format_message_summary(email),
(u"From : \"encoded from (éàê)\" not encoded part\nSubject " + \
u": \"encoded subject (éàê)\" not encoded part\n\n",
u"\"encoded from (éàê)\" not encoded part"))
test_format_message_single_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded single part\n",
u"not encoded from"))
test_format_message_single_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\nEncoded single part with 'iso-8859-15' charset" + \
u" (éàê)\n",
u"encoded from (éàê)"))
test_format_message_multi_not_encoded = \
make_test((False, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded multipart1\nNot encoded multipart2\n",
u"not encoded from"))
test_format_message_multi_encoded = \
make_test((True, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject (éà" + \
u"ê)\n\nutf-8 multipart1 with no charset (éàê)" + \
u"\nEncoded multipart2 with 'iso-8859-15' charset (éàê)\n" + \
u"Encoded multipart3 with no charset (éàê)\n",
u"encoded from (éàê)"))
def test_get_default_status_msg(self):
"""
Get default status message for MailAccount.
Should raise NotImplementedError because get_type() method
is not implemented
"""
try:
self.account.get_default_status_msg(Lang.en)
except NotImplementedError:
return
fail("No NotImplementedError raised")
class POP3Account_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, POP3Account])
self.pop3_account = POP3Account(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com",
login="login")
self.pop3_account.password = "pass"
self.pop3_account.host = "localhost"
self.pop3_account.port = 1110
self.pop3_account.ssl = False
model.db_disconnect()
self.account_class = POP3Account
def make_test(responses=None, queries=None, core=None):
def inner(self):
self.server = server.DummyServer("localhost", 1110)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["+OK connected\r\n",
"+OK name is a valid mailbox\r\n",
"+OK pass\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["USER login\r\n",
"PASS pass\r\n"]
if queries:
self.server.queries += queries
self.server.queries += ["QUIT\r\n"]
self.pop3_account.connect()
self.failUnless(self.pop3_account.connection,
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.pop3_account.disconnect()
self.failUnless(self.server.verify_queries(),
"Sended queries does not match expected queries.")
return inner
test_connection = make_test
test_get_mail_list_summary = \
make_test(["+OK 2 20\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_mail_list_summary_start_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 3\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 2 0\r\n",
"TOP 3 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(start_index=2),
[("2", "mail subject 2"),
("3", "mail subject 3")]))
test_get_mail_list_summary_end_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(end_index=2),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_new_mail_list = \
make_test(["+OK 2 20\r\n"],
["STAT\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_new_mail_list(),
["1", "2"]))
test_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n",
u"user@test.com")))
test_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"user@test.com")))
test_unsupported_reset_command_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n",
u"user@test.com")))
test_unsupported_reset_command_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"user@test.com")))
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.pop3_account.nb_mail = 0
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [])
def test_get_next_mail_index(self):
"""
Test get_next_mail_index first check.
"""
mail_list = [1, 2, 3, 4]
self.pop3_account.nb_mail = 4
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3, 4])
self.assertEquals(self.pop3_account.lastmail, 4)
def test_get_next_mail_index_second_check(self):
"""
Test get_next_mail_index second check (no parallel checking).
"""
mail_list = [1, 2, 3, 4, 5, 6, 7, 8]
self.pop3_account.nb_mail = 8
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [5, 6, 7, 8])
self.assertEquals(self.pop3_account.lastmail, 8)
def test_get_next_mail_index_second_check_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but not more new emails than last index jmc stopped:
3 new emails after another client checked emails).
"""
mail_list = [1, 2, 3]
self.pop3_account.nb_mail = 3
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3])
self.assertEquals(self.pop3_account.lastmail, 3)
def test_get_next_mail_index_second_check_bug_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but with more new emails than last index jmc stopped:
5 new emails after another client checked emails). Cannot make
the difference with one new email since last jmc email check!!
"""
mail_list = [1, 2, 3, 4, 5]
self.pop3_account.nb_mail = 5
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
# with no bug it should be:
# self.assertEquals(result, [1, 2, 3, 4, 5])
self.assertEquals(result, [5])
self.assertEquals(self.pop3_account.lastmail, 5)
def test_get_default_status_msg(self):
"""
Get default status message for POP3Account.
"""
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3://login@localhost:1110")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL POP3Account.
"""
self.pop3_account.ssl = True
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3s://login@localhost:1110")
class IMAPAccount_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, IMAPAccount])
self.imap_account = IMAPAccount(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com",
login="login")
self.imap_account.password = "pass"
self.imap_account.host = "localhost"
self.imap_account.port = 1143
self.imap_account.ssl = False
self.account_class = IMAPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1143)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["* OK [CAPABILITY IMAP4 LOGIN-REFERRALS " + \
"AUTH=PLAIN]\r\n", \
lambda data: "* CAPABILITY IMAP4 " + \
"LOGIN-REFERRALS AUTH=PLAIN\r\n" + \
data.split()[0] + \
" OK CAPABILITY completed\r\n", \
lambda data: data.split()[0] + \
" OK LOGIN completed\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["^[^ ]* CAPABILITY", \
"^[^ ]* LOGIN login \"pass\""]
if queries:
self.server.queries += queries
self.server.queries += ["^[^ ]* LOGOUT"]
if not self.imap_account.connected:
self.imap_account.connect()
self.failUnless(self.imap_account.connection, \
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
if self.imap_account.connected:
self.imap_account.disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_connection(self):
test_func = self.make_test()
test_func()
def test_get_mail_list_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
def test_get_mail_list_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_list_summary(), readonly=True)
def test_get_mail_list_summary_start_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
"* 3 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 3\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 2:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(start_index=2),
[('2', 'mail subject 2'),
('3', 'mail subject 3')]))
test_func()
def test_get_mail_list_summary_end_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:2 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(end_index=2),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
def test_get_new_mail_list(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT INBOX",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def __test_select_inbox_does_not_exist(self, tested_func,
exception_message="Mailbox does not exist",
readonly=False):
def check_func(self):
try:
tested_func()
except Exception, e:
self.assertEquals(str(e), exception_message)
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* " + (readonly and "EXAMINE" or "SELECT") + " INBOX"],
check_func)
test_func()
def test_get_new_mail_list_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account_get_new_mail_list())
def test_get_new_mail_list_delimiter1(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_new_mail_list_delimiter2(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "/"
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX/dir1/subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_mail_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_summary(1),
"Mailbox does not exist (email 1)", True)
def test_get_new_mail_list_inbox_does_not_exist(self):
def check_func(self):
try:
self.imap_account.get_new_mail_list()
except Exception, e:
self.assertEquals(str(e), "Mailbox does not exist")
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* SELECT INBOX"],
check_func)
test_func()
def test_get_mail_summary_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_get_mail_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail(1),
"Mailbox does not exist (email 1)", True)
def test_get_mail_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_build_folder_cache(self):
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX"\r\n' + \
'* LIST () "." "INBOX.dir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
'* LIST () "." "INBOX.dir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"\" \*"],
lambda self: self.assertEquals(self.imap_account._build_folder_cache(),
{"INBOX":
{"dir1":
{"subdir1": {},
"subdir2": {}},
"dir2": {}}}))
test_func()
def test_ls_dir_base(self):
self.test_build_folder_cache()
self.assertEquals(self.imap_account.ls_dir(""),
["INBOX"])
def test_ls_dir_subdir(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX")
result.sort()
self.assertEquals(result,
["dir1", "dir2"])
def test_ls_dir_subsubdir_delim1(self):
self.test_build_folder_cache()
self.imap_account.default_delimiter = "."
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_ls_dir_subsubdir_delim2(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_populate_handler(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_default_delimiter(self):
self.imap_account.delimiter = "/"
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
self.assertEquals(".", self.imap_account.delimiter)
test_func = self.make_test(\
[lambda data: data.split()[0] + ' OK LIST completed\r\n',
lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX/dir1/subdir2\"? \*",
"^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_mailbox(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX.dir1.subdir2"
def call_func(self):
try:
self.imap_account.populate_handler()
except Exception, e:
return
self.fail("Exception should have been raised")
test_func = self.make_test(\
[lambda data: data.split()[0] + ' ERR LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def check_get_next_mail_index(self, mail_list):
"""
Common tests for get_next_mail_index method.
"""
result = []
original_mail_list = [elt for elt in mail_list]
for elt in self.imap_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(mail_list, [])
self.assertEquals(result, original_mail_list)
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.check_get_next_mail_index(mail_list)
def test_get_next_mail_index(self):
"""
Test get_next_mail_index.
"""
mail_list = [1, 2, 3, 4]
self.check_get_next_mail_index(mail_list)
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imap://login@localhost:1143")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
self.imap_account.ssl = True
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imaps://login@localhost:1143")
class AbstractSMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount, AbstractSMTPAccount])
self.account_class = AbstractSMTPAccount
def test_default_account_post_func_no_default_true(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "user1@test.com")
self.assertTrue(value)
def test_default_account_post_func_no_default_false(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "user1@test.com")
self.assertTrue(value)
def test_default_account_post_func_true(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "user1@test.com")
self.assertTrue(value)
self.assertFalse(account12.default_account)
def test_default_account_post_func_false(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "user1@test.com")
self.assertFalse(value)
self.assertTrue(account12.default_account)
def test_create_email(self):
account11 = AbstractSMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
email = account11.create_email("from@test.com",
"to@test.com",
"subject",
"body")
self.assertEqual(email['From'], "from@test.com")
self.assertEqual(email['To'], "to@test.com")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email.get_payload(), "body")
def test_create_email_other_headers(self):
account11 = AbstractSMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
email = account11.create_email("from@test.com",
"to@test.com",
"subject",
"body",
{"Bcc": "bcc@test.com",
"Cc": "cc@test.com"})
self.assertEqual(email['From'], "from@test.com")
self.assertEqual(email['To'], "to@test.com")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email['Bcc'], "bcc@test.com")
self.assertEqual(email['Cc'], "cc@test.com")
self.assertEqual(email.get_payload(), "body")
class SMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount,
AbstractSMTPAccount, SMTPAccount])
self.account_class = SMTPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1025)
thread.start_new_thread(self.server.serve, ())
self.server.responses = []
if responses:
self.server.responses += responses
self.server.responses += ["221 localhost closing connection\r\n"]
self.server.queries = []
if queries:
self.server.queries += queries
self.server.queries += ["quit\r\n"]
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_send_email_esmtp_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost SMTP\r\n",
"504 ESMTP not supported\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"helo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method2(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"535 Incorrect Authentication data\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method_with_no_suitable_auth_method_error(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN DIGEST-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtp://user@localhost:1025")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
smtp_account.tls = True
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtps://user@localhost:1025")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AccountModule_TestCase, 'test'))
suite.addTest(unittest.makeSuite(MailAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(POP3Account_TestCase, 'test'))
suite.addTest(unittest.makeSuite(IMAPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(AbstractSMTPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(SMTPAccount_TestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| dax/jmc | src/jmc/model/tests/account.py | Python | gpl-2.0 | 54,705 | 0.007502 |
#!/usr/bin/env python
# -*- coding: utf-8
import sys
import argparse
from ete3 import Tree
import anvio.db as db
import anvio.utils as utils
import anvio.terminal as terminal
from anvio.errors import ConfigError
run = terminal.Run()
progress = terminal.Progress()
current_version, next_version = [x[1:] for x in __name__.split('_to_')]
item_orders_table_name = 'item_orders'
item_orders_table_structure = ['name', 'type', 'data']
item_orders_table_types = ['text', 'text', 'text']
layer_orders_table_name = 'layer_orders'
layer_orders_table_structure = ['data_key', 'data_type', 'data_value']
layer_orders_table_types = [ 'text' , 'text' , 'text' ]
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
# make sure someone is not being funny
utils.is_profile_db(db_path)
# make sure the version is accurate
profile_db = db.DB(db_path, None, ignore_version = True)
if str(profile_db.get_version()) != current_version:
raise ConfigError("Version of this profile database is not %s (hence, this script cannot really do anything)." % current_version)
# migrate item orders
item_orders = profile_db.get_table_as_dict(item_orders_table_name)
for order_name in item_orders:
if item_orders[order_name]['type'] == 'newick':
newick = Tree(item_orders[order_name]['data'], format=1)
newick = newick.write(format=2)
profile_db._exec("""UPDATE %s SET "data" = ? WHERE "name" LIKE ?""" % item_orders_table_name, (newick, order_name))
# migrate layer orders
layer_orders = profile_db.get_table_as_dict(layer_orders_table_name)
for order_name in layer_orders:
if layer_orders[order_name]['data_type'] == 'newick':
newick = Tree(layer_orders[order_name]['data_value'], format=1)
newick = newick.write(format=2)
profile_db._exec("""UPDATE %s SET "data_value" = ? WHERE "data_key" LIKE ?""" % layer_orders_table_name, (newick, order_name))
# set the version
profile_db.remove_meta_key_value_pair('version')
profile_db.set_version(next_version)
# bye
profile_db.disconnect()
progress.end()
run.info_single('Your profile db is now %s. Aww, yisss.' % next_version, nl_after=1, nl_before=1, mc='green')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A simple script to upgrade profile database from version %s to version %s' % (current_version, next_version))
parser.add_argument('profile_db', metavar = 'PROFILE_DB', help = "An anvi'o profile database of version %s" % current_version)
args, unknown = parser.parse_known_args()
try:
migrate(args.profile_db)
except ConfigError as e:
print(e)
sys.exit(-1)
| merenlab/anvio | anvio/migrations/profile/v26_to_v27.py | Python | gpl-3.0 | 2,866 | 0.007676 |
import numpy as np
import warnings
def mean_time(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
compute mean switching time using the specified thresholds. Timing
data is assumed to be a numpy array.
"""
t = t[np.logical_and(t > min_threshold, t < max_threshold)]
if np.size(t) > 0:
t_mean = np.mean(t)
t_std = np.std(t)
else:
t_mean = np.nan
t_std = np.nan
return t_mean, t_std
def mean_time_diff(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timers, and
compute mean switching time using the specified thresholds.
"""
dt = t[0][:] - t[1][:]
t0_mask = np.logical_and(t[0,:] > min_threshold, t[0,:] < max_threshold)
t1_mask = np.logical_and(t[1,:] > min_threshold, t[1,:] < max_threshold)
dt = dt[np.logical_and(t0_mask, t1_mask)]
if np.size(dt) > 0:
dt_mean = np.mean(dt)
dt_std = np.std(dt)
else:
dt_mean = np.nan
dt_std = np.nan
return dt_mean, dt_std
def prob(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
compute switching probability using the specified thresholds.
"""
return float(np.size(t[np.logical_and(t > min_threshold, t < max_threshold)])) / float(np.size(t))
def outcomes(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
convert to a numpy array of 0 or 1 based on the thresholds.
"""
def _threshold(x):
if x > min_threshold and x < max_threshold:
return 1
else:
return 0
threshold_vectorized = np.vectorize(_threshold)
return threshold_vectorized(t)
def corr_coef_from_outcomes(outcomes):
"""
Compute correrlation coefficient from an array of switching
outcomes.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.corrcoef(outcomes[0,:], outcomes[1,:])[0,1]
def software_demod(t, freq, Is, Qs):
"""
Demodulate I and Q data in software. This method uses
ADC frequency for demodulation.
Input:
t: time vector during which to demodulate data (ns).
freq: demodulation frequency (GHz).
Is: I data.
Qs: Q data.
Output:
Id, Qd: demodulated I and Q.
"""
demod = 2 * np.pi * t * freq
Sv = np.sin(demod)
Cv = np.cos(demod)
Id = np.mean(Is * Cv - Qs * Sv)
Qd = np.mean(Is * Sv + Qs * Cv)
return Id, Qd | McDermott-Group/LabRAD | LabRAD/Measurements/General/data_processing.py | Python | gpl-2.0 | 2,679 | 0.008959 |
from edge import DummyEdgeEnd
from simulation_event import AbstractSimulationEvent
from stats import TripStats
class AbstractAntMove(AbstractSimulationEvent):
def __init__(self, ant, origin, destination, end_time, pheromone_to_drop, trip_stats):
self.ant = ant
self.origin = origin
self.destination = destination
if self.origin is not None and self.destination is not None:
if self.origin.edge is not None and self.destination.edge is not None:
#print 'origin', self.origin
#print 'destination', self.destination
assert self.origin.edge == self.destination.edge
self.end_time = end_time
self.pheromone_to_drop = pheromone_to_drop
self.trip_stats = trip_stats
def process_start(self):
self.origin.drop_pheromone(self.pheromone_to_drop)
return frozenset((self.origin.edge, self.origin.point))
def process_end(self, reality, stats):
changed = [self.destination.edge]
self.trip_stats.edge_visited(self.destination.edge)
self.destination.drop_pheromone(self.pheromone_to_drop)
if not self.destination.point.is_anthill() and self.destination.point.food > 0 and not self.ant.food: # ant has found the food
changed.append(self.destination.point)
self.trip_stats.food_found()
self.destination.point.food -= 1
self.ant.food += 1
stats.food_found(self.trip_stats)
stats.present()
elif self.destination.point.is_anthill(): # ant has returned to the anthill
if self.ant.food: # with food
changed.append(self.destination.point)
self.destination.point.food += self.ant.food
self.trip_stats.back_home()
new_ant = self.ant.__class__(self.ant.world_parameters)
return AntRestartMove(new_ant, anthill=DummyEdgeEnd(self.destination.point), end_time=reality.world.elapsed_time), frozenset(changed)
else: # with no food
self.trip_stats.reset_route()
new_destination_edge, pheromone_to_drop = self.ant.tick(self.destination.point)
assert new_destination_edge in (end.edge for end in self.destination.point.edge_ends), 'Illegal ant move'
assert reality.environment_parameters.min_pheromone_dropped_by_ant <= pheromone_to_drop <= reality.environment_parameters.max_pheromone_dropped_by_ant, 'Illegal ant pheromone drop: %s' % (repr(pheromone_to_drop),)
self.trip_stats.normal_move(new_destination_edge.cost)
new_destination = new_destination_edge.get_other_end_by_point(self.destination.point)
origin = new_destination_edge.get_other_end(new_destination)
end_time = reality.world.elapsed_time + new_destination_edge.cost
return AntMove(
ant=self.ant,
origin=origin,
destination=new_destination,
end_time=end_time,
pheromone_to_drop=pheromone_to_drop,
trip_stats=self.trip_stats,
), frozenset(changed)
def __repr__(self):
return '%s@%s' % (self.__class__.__name__, self.end_time,)
class AntRestartMove(AbstractAntMove):
def __init__(self, ant, anthill, end_time):
super(AntRestartMove, self).__init__(ant, None, anthill, end_time=end_time, pheromone_to_drop=0, trip_stats=TripStats())
def process_start(self):
return frozenset()
class AntStartMove(AntRestartMove):
def __init__(self, ant, anthill):
super(AntStartMove, self).__init__(ant, anthill, end_time=0)
class AntMove(AbstractAntMove):
pass
| ppolewicz/ant-colony | antcolony/ant_move.py | Python | bsd-3-clause | 3,648 | 0.006853 |
'''
from https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#specifying-a-custom-user-model
'''
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import gettext_lazy as _
from custom_user.models import User
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
password1 = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Password confirmation'), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(_("Passwords don't match"))
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_superuser')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class MyUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'is_superuser')
list_filter = ('is_superuser',)
fieldsets = (
(None, {'fields': ('email','name', 'password', 'family')}),
('Permissions', {'fields': ('is_superuser','is_active',)}),
('Settings', {'fields': ('language','receive_update_emails','receive_photo_update_emails')}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
raw_id_fields = ('family',)
# Now register the new UserAdmin...
admin.site.register(User, MyUserAdmin)
| JustinWingChungHui/okKindred | custom_user/admin.py | Python | gpl-2.0 | 3,150 | 0.00381 |
# coding: utf-8
__author__ = "@strizhechenko"
import sys
from morpher import Morpher
from twitterbot_utils import Twibot
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
bot = Twibot()
morphy = Morpher()
def tweets2words(tweets):
string = " ".join([tweet.text for tweet in tweets])
return morphy.process_to_words(string)
@sched.scheduled_job('interval', minutes=15)
def do_tweets():
print 'New tick'
words = tweets2words(bot.fetch_list(list_id=217926157))
for word in words:
tweet = morphy.word2phrase(word)
bot.tweet(tweet)
print 'post', tweet.encode('utf-8')
@sched.scheduled_job('interval', hours=24)
def do_wipe():
print 'Wipe time'
bot.wipe()
if __name__ == '__main__':
do_tweets()
if '--test' in sys.argv:
exit(0)
sched.start()
| strizhechenko/twitterbots | memes_zaebali.py | Python | gpl-3.0 | 857 | 0.001167 |
"""Modify Group Entry Message."""
from enum import IntEnum
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import (
FixedTypeList, Pad, UBInt8, UBInt16, UBInt32)
from pyof.v0x04.common.header import Header, Type
from pyof.v0x04.controller2switch.common import Bucket
__all__ = ('GroupMod', 'GroupModCommand', 'GroupType', 'Group',
'ListOfBuckets')
class Group(IntEnum):
"""Group numbering. Groups can use any number up to attr:`OFPG_MAX`."""
#: Last usable group number.
OFPG_MAX = 0xffffff00
#: Fake groups.
#: Represents all groups for group delete commands.
OFPG_ALL = 0xfffffffc
#: Wildcard group used only for flow stats requests.
# Select all flows regardless of group (including flows with no group).
OFPG_ANY = 0xffffffff
class GroupModCommand(IntEnum):
"""Group commands."""
#: New group.
OFPGC_ADD = 0
#: Modify all matching groups.
OFPGC_MODIFY = 1
#: Delete all matching groups.
OFPGC_DELETE = 2
class GroupType(IntEnum):
"""Group types. Range [128, 255] is reserved for experimental use."""
#: All (multicast/broadcast) group.
OFPGT_ALL = 0
#: Select group.
OFPGT_SELECT = 1
#: Indirect group.
OFPGT_INDIRECT = 2
#: Fast failover group.
OFPGT_FF = 3
class ListOfBuckets(FixedTypeList):
"""List of buckets.
Represented by instances of Bucket.
"""
def __init__(self, items=None):
"""Create a ListOfBuckets with the optional parameters below.
Args:
items (Bucket): Instance or a list of instances.
"""
super().__init__(pyof_class=Bucket, items=items)
class GroupMod(GenericMessage):
"""Group setup and teardown (controller -> datapath)."""
header = Header(message_type=Type.OFPT_GROUP_MOD)
command = UBInt16(enum_ref=GroupModCommand)
group_type = UBInt8()
#: Pad to 64 bits.
pad = Pad(1)
group_id = UBInt32()
buckets = ListOfBuckets()
def __init__(self, xid=None, command=None, group_type=None, group_id=None,
buckets=None):
"""Create a GroupMod with the optional parameters below.
Args:
xid (int): Header's transaction id. Defaults to random.
command (GroupModCommand): One of OFPGC_*.
group_type (GroupType): One of OFPGT_*.
group_id (int): Group identifier.
buckets (:class:`ListOfBuckets`): The length of the bucket
array is inferred from the length field in the header.
"""
super().__init__(xid)
self.command = command
self.group_type = group_type
self.group_id = group_id
self.buckets = buckets
| cemsbr/python-openflow | pyof/v0x04/controller2switch/group_mod.py | Python | mit | 2,734 | 0 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2015 GEM Foundation
#
# The Catalogue Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# with this download. If not, see <http://www.gnu.org/licenses/>
#!/usr/bin/env/python
"""
Collection of Catalogue Database Query Tools
"""
import h5py
import re
import numpy as np
import pandas as pd
from copy import copy, deepcopy
from datetime import datetime, date, time
from collections import OrderedDict
import matplotlib
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm
import eqcat.utils as utils
from eqcat.regression_models import function_map
from matplotlib.path import Path
from scipy import odr
from eqcat.isf_catalogue import (Magnitude, Location, Origin,
Event, ISFCatalogue)
try:
from mpl_toolkits.basemap import Basemap
except:
print("Basemap not installed or unavailable!")
print("Catalogue Plotting Functions will not work")
# RESET Axes tick labels
matplotlib.rc("xtick", labelsize=14)
matplotlib.rc("ytick", labelsize=14)
# Switch to Type 1 fonts
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
matplotlib.rcParams["ps.useafm"] = True
class CatalogueDB(object):
"""
Holder class for the catalogue database
"""
def __init__(self, filename=None):
"""
Instantiate the class. If a filename is supplied this will load the
data from the file
:param str filename:
Path to input file
"""
self.filename = filename
self.origins = []
self.magnitudes = []
self.number_origins = None
self.number_magnitudes = None
self.load_data_from_file()
def load_data_from_file(self):
"""
If a filename is specified then will import data from file
"""
if self.filename:
self.origins = pd.read_hdf(self.filename, "catalogue/origins")
self.magnitudes = pd.read_hdf(self.filename,
"catalogue/magnitudes")
_ = self._get_number_origins_magnitudes()
else:
pass
def _get_number_origins_magnitudes(self):
"""
Returns the number of origins and the number of magnitudes
"""
self.number_origins = len(self.origins)
self.number_magnitudes = len(self.magnitudes)
return self.number_origins, self.number_magnitudes
def export_current_selection(self, output_file):
"""
Exports the current selection to file
"""
store = pd.HDFStore(output_file)
store.append("catalogue/origins", self.origins)
store.append("catalogue/magnitudes", self.magnitudes)
store.close()
def build_isf(self, identifier, name):
"""
Creates an instance of the ISFCatalogue class from the hdf5 format
:param str identifier:
Identifier string of the ISFCatalogue object
:param str name:
Name for the ISFCatalogue object
:returns:
Catalogue as instance of :class: ISFCatalogue
"""
isf_catalogue = ISFCatalogue(identifier, name)
event_groups = self.origins.groupby("eventID")
mag_groups = self.magnitudes.groupby("eventID")
mag_keys = list(mag_groups.indices.keys())
ngrps = len(event_groups)
for iloc, grp in enumerate(event_groups):
if (iloc % 1000) == 0:
print("Processing event %d of %d" % (iloc, ngrps))
# Get magnitudes list
if grp[0] in mag_keys:
# Magnitudes associated to this origin
mag_list = self._get_magnitude_classes(
mag_groups.get_group(grp[0]))
else:
mag_list = []
# Get origins
origin_list = self._get_origin_classes(grp[1], mag_list)
event = Event(grp[0], origin_list, mag_list)
isf_catalogue.events.append(event)
return isf_catalogue
def _get_origin_classes(self, orig_group, mag_list):
"""
Gets the Origin class representation for a particular event
:param orig_group:
Pandas Group object
:param list:
List of :class: Magnitude objects
"""
origin_list = []
norig = orig_group.shape[0]
for iloc in range(0, norig):
# Get location
location = Location(orig_group.originID.values[iloc],
orig_group.longitude.values[iloc],
orig_group.latitude.values[iloc],
orig_group.depth.values[iloc],
orig_group.semimajor90.values[iloc],
orig_group.semiminor90.values[iloc],
orig_group.error_strike.values[iloc],
orig_group.depth_error.values[iloc])
# origin
orig_date = date(orig_group.year.values[iloc],
orig_group.month.values[iloc],
orig_group.day.values[iloc])
micro_seconds = (orig_group.second.values[iloc] -
np.floor(orig_group.second.values[iloc])) * 1.0E6
seconds = int(orig_group.second.values[iloc])
if seconds > 59:
seconds = 0
minute_inc = 1
else:
minute_inc = 0
orig_time = time(orig_group.hour.values[iloc],
orig_group.minute.values[iloc] + minute_inc,
seconds,
int(micro_seconds))
origin = Origin(orig_group.originID.values[iloc],
orig_date,
orig_time,
location,
orig_group.Agency.values[iloc],
is_prime=bool(orig_group.prime.values[iloc]),
time_error = orig_group.time_error.values[iloc])
for mag in mag_list:
if mag.origin_id == origin.id:
origin.magnitudes.append(mag)
origin_list.append(origin)
return origin_list
def _get_magnitude_classes(self, mag_group):
"""
For a given event, returns the list of magnitudes
:param mag_group:
Group of magnitudes for a given event as instance of Pandas
Group object
"""
mag_list = []
nmags = mag_group.shape[0]
for iloc in range(0, nmags):
mag = Magnitude(mag_group.eventID.values[iloc],
mag_group.originID.values[iloc],
mag_group.value.values[iloc],
mag_group.magAgency.values[iloc],
mag_group.magType.values[iloc],
mag_group.sigma.values[iloc])
mag.magnitude_id = mag_group.magnitudeID.values[iloc]
mag_list.append(mag)
return mag_list
class CatalogueSelector(object):
"""
Tool to select sub-sets of the catalogue
"""
def __init__(self, catalogue, create_copy=True):
"""
"""
self.catalogue = catalogue
self.copycat = create_copy
def _select_by_origins(self, idx, select_type="any"):
"""
Returns a catalogue selected from the original catalogue by
origin
:param idx:
Pandas Series object indicating the truth of an array
"""
if select_type == "all":
output_catalogue = CatalogueDB()
output_catalogue.origins = self.catalogue.origins[idx]
output_catalogue.magnitudes = self.catalogue.magnitudes[
self.catalogue.magnitudes["eventID"].isin(
output_catalogue.origins["eventID"].unique())]
return output_catalogue
if not select_type == "any":
raise ValueError(
"Selection Type must correspond to 'any' or 'all'")
valid_origins = self.catalogue.origins.eventID[idx]
event_list = valid_origins.unique()
select_idx1 = self.catalogue.origins.eventID.isin(event_list)
select_idx2 = self.catalogue.magnitudes.eventID.isin(event_list)
if self.copycat:
output_catalogue = CatalogueDB()
output_catalogue.origins = self.catalogue.origins[select_idx1]
output_catalogue.magnitudes =\
self.catalogue.magnitudes[select_idx2]
_ = output_catalogue._get_number_origins_magnitudes
else:
self.catalogue.origins = self.catalogue.origins[select_idx1]
self.catalogue.magnitudes = self.catalogue.magnitudes[select_idx2]
return output_catalogue
def _select_by_magnitudes(self, idx, select_type="any"):
"""
Returns a catalogue selected from the original catalogue by
magnitude
:param idx:
Pandas Series object indicating the truth of an array
"""
if select_type == "all":
output_catalogue = CatalogueDB()
output_catalogue.magnitudes = self.catalogue.magnitudes[idx]
output_catalogue.origins = self.catalogue.origins[
self.catalogue.origins["eventID"].isin(
output_catalogue.magnitudes["eventID"].unique())]
return output_catalogue
if not select_type == "any":
raise ValueError(
"Selection Type must correspond to 'any' or 'all'")
valid_mags = self.catalogue.magnitudes.eventID[idx]
event_list = valid_mags.unique()
select_idx1 = self.catalogue.magnitudes.eventID.isin(event_list)
select_idx2 = self.catalogue.origins.eventID.isin(event_list)
if self.copycat:
output_catalogue = CatalogueDB()
output_catalogue.magnitudes =\
self.catalogue.magnitudes[select_idx1]
output_catalogue.origins = self.catalogue.origins[select_idx2]
_ = output_catalogue._get_number_origins_magnitudes
else:
self.catalogue.magnitudes = self.catalogue.magnitude[select_idx1]
self.catalogue.origins = self.catalogue.origins[select_idx2]
return output_catalogue
def select_by_agency(self, agency, select_type="any"):
"""
Selects by agency type
"""
idx = self.catalogue.origins.Agency == agency
return self._select_by_origins(idx, select_type)
def limit_to_agency(self, agency, mag_agency=None):
"""
Limits the catalogue to just those origins and magnitudes reported by
the specific agency
"""
if not mag_agency:
mag_agency = agency
select_idx1 = self.catalogue.magnitudes.magAgency == mag_agency
select_idx2 = self.catalogue.origins.Agency == agency
if self.copycat:
output_catalogue = CatalogueDB()
output_catalogue.magnitudes =\
self.catalogue.magnitudes[select_idx1]
output_catalogue.origins = self.catalogue.origins[select_idx2]
_ = output_catalogue._get_number_origins_magnitudes
else:
self.catalogue.magnitudes = self.catalogue.magnitude[select_idx1]
self.catalogue.origins = self.catalogue.origins[select_idx2]
return output_catalogue
def select_within_depth_range(self, upper_depth=None, lower_depth=None,
select_type="any"):
"""
Selects within a depth range
"""
if not upper_depth:
upper_depth = 0.0
if not lower_depth:
lower_depth = np.inf
idx = (self.catalogue.origins["depth"] >= upper_depth) &\
(self.catalogue.origins["depth"] <= lower_depth) &\
(self.catalogue.origins["depth"].notnull())
#idx[np.logical_not(np.isnan(self.catalogue.origins["depth"]))] = False
#idix[np.isnan(self.catalogue.origins["depth"])]
return self._select_by_origins(idx, select_type)
def select_within_magnitude_range(self, lower_mag=None, upper_mag=None,
select_type="any"):
"""
Selects within a magnitude range
"""
if not lower_mag:
lower_mag = -np.inf
if not upper_mag:
upper_mag = np.inf
idx = (self.catalogue.magnitudes["value"] >= lower_mag) &\
(self.catalogue.magnitudes["value"] <= upper_mag)
return self._select_by_magnitudes(idx, select_type)
def select_within_polygon(self, poly_lons, poly_lats, select_type="any"):
"""
Select within a polygon
"""
polypath = Path(np.column_stack([poly_lons, poly_lats]))
idx = pd.Series(polypath.contains_points(np.column_stack([
self.catalogue.origins["longitude"].values,
self.catalogue.origins["latitude"].values])))
#idx = idx & self.catalogue.origins["depth"].notnull()
return self._select_by_origins(idx, select_type)
def select_within_bounding_box(self, bounds, select_type="any"):
"""
Selects within a bounding box
"""
llon = bounds[0]
ulon = bounds[2]
llat = bounds[1]
ulat = bounds[3]
bbox = np.array([[llon, ulat],
[ulon, ulat],
[ulon, llat],
[llon, llat]])
return self.select_within_polygon(bbox[:, 0], bbox[:, 1], select_type)
def select_within_date_range(self, start_date=None, end_date=None,
select_type="any"):
"""
Selects within a date[years] range
"""
if not start_date:
start_date = 0
if not end_date:
end_date = 2015
idx = (self.catalogue.origins["year"] >= start_date) &\
(self.catalogue.origins["year"] <= end_date)
return self._select_by_origins(idx, select_type)
def get_agency_origin_count(catalogue):
"""
Returs a list of tuples of the agecny and the number of origins per
agency
"""
agency_count = catalogue.origins["Agency"].value_counts()
count_list = []
agency_list = list(agency_count.keys())
for iloc in range(0, len(agency_count)):
count_list.append((agency_list[iloc], agency_count[iloc]))
return count_list
def get_agency_magnitude_count(catalogue):
"""
Returs a list of tuples of the agency and the number of magnitudes per
agency
"""
agency_count = catalogue.magnitudes["magAgency"].value_counts()
count_list = []
agency_list = list(agency_count.keys())
for iloc in range(0, len(agency_count)):
count_list.append((agency_list[iloc], agency_count[iloc]))
return count_list
def get_agency_magtype_statistics(catalogue, pretty_print=True):
"""
Returns an analysis of the number of different magnitude types found for
each agency
"""
agency_count = get_agency_origin_count(catalogue)
mag_group = catalogue.magnitudes.groupby("magAgency")
mag_group_keys = list(mag_group.groups.keys())
output = []
for agency, n_origins in agency_count:
print("Agency: %s - %d Origins" % (agency, n_origins))
if not agency in mag_group_keys:
print("No magnitudes corresponding to this agency")
print("".join(["=" for iloc in range(0, 40)]))
continue
grp1 = mag_group.get_group(agency)
mag_counts = grp1["magType"].value_counts()
mag_counts = iter(mag_counts.items())
if pretty_print:
print("%s" % " | ".join(["{:s} ({:d})".format(val[0], val[1])
for val in mag_counts]))
print("".join(["=" for iloc in range(0, 40)]))
agency_dict = {"Origins": n_origins, "Magnitudes": dict(mag_counts)}
output.append((agency, agency_dict))
return OrderedDict(output)
def get_agency_magtype_statistics_with_agency_code(catalogue,
agency_dict=None,
pretty_print=True):
"""
Returns an analysis of the number of different magnitude types found for
each agency
"""
agency_count = get_agency_origin_count(catalogue)
mag_group = catalogue.magnitudes.groupby("magAgency")
mag_group_keys = mag_group.groups.keys()
output = []
agency_name = []
agency_country = []
agency_codes = agency_dict
for agency, n_origins in agency_count:
for key, value in sorted(agency_codes.iteritems()):
if key == agency:
agency_name = value.get('name')
agency_country = value.get('country')
print("Agency: %s - %s - %s " % (agency, agency_name, agency_country))
print("Origins: %d " % (n_origins))
if not agency in mag_group_keys:
print("No magnitudes corresponding to this agency")
print("".join(["=" for iloc in range(0, 40)]))
continue
grp1 = mag_group.get_group(agency)
mag_counts = grp1["magType"].value_counts()
mag_counts = mag_counts.iteritems()
if pretty_print:
print("%s" % " | ".join(["{:s} ({:d})".format(val[0], val[1])
for val in mag_counts]))
print("".join(["=" for iloc in range(0, 40)]))
agency_dict = {"Origins": n_origins, "Magnitudes": dict(mag_counts)}
output.append((agency, agency_dict))
return OrderedDict(output)
def get_agency_magnitude_pairs(catalogue, pair1, pair2, no_case=False):
"""
Returns a set of vectors corresponding to the common magnitudes
recorded by an (Agency, Magnitude Type) pair.
:params catalogue:
Instance of the CatalogueDB class
:params tuple pair1:
Agency and magnitude combination (Agency, Magnitude Type) for defining
the independent variable
:params tuple pair2:
Agency and magnitude combination (Agency, Magnitude Type) for defining
the dependent variable
:params bool no_case:
Makes the selection case sensitive (True) or ignore case (False)
"""
if no_case:
case1_select = (
catalogue.magnitudes["magAgency"].str.lower() == pair1[0].lower()
) &\
(catalogue.magnitudes["magType"].str.lower() == pair1[1].lower())
case2_select = (
catalogue.magnitudes["magAgency"].str.lower() == pair2[0].lower()
) &\
(catalogue.magnitudes["magType"].str.lower() == pair2[1].lower())
else:
case1_select = (catalogue.magnitudes["magAgency"] == pair1[0]) &\
(catalogue.magnitudes["magType"] == pair1[1])
case2_select = (catalogue.magnitudes["magAgency"] == pair2[0]) &\
(catalogue.magnitudes["magType"] == pair2[1])
if not np.any(case1_select):
print("Agency-Pair: (%s, %s) returned no magnitudes" %(pair1[0],
pair1[1]))
return None, None
if not np.any(case2_select):
print("Agency-Pair: (%s, %s) returned no magnitudes" %(pair2[0],
pair2[1]))
return None, None
select_cat1 = catalogue.magnitudes[case1_select]
select_cat2 = catalogue.magnitudes[case2_select]
# See if any eventIDs in the second catalogues are in the first
idx = select_cat2.eventID.isin(select_cat1.eventID)
num_events = np.sum(idx)
if np.any(idx):
print("Agency-Pairs: (%s, %s) & (%s, %s) returned %d events" % (
pair1[0], pair1[1], pair2[0], pair2[1], np.sum(idx)))
else:
# No common events
print("Agency-Pairs: (%s, %s) & (%s, %s) returned 0 events" % (
pair1[0], pair1[1], pair2[0], pair2[1]))
return None, None
common_catalogue = select_cat2[idx]
cat1_groups = select_cat1.groupby("eventID")
mag1 = []
sigma1 = []
mag2 = []
sigma2 = []
for i, grp in common_catalogue.groupby("eventID"):
if len(grp) > 1:
# Find the event with the largest magnitude - some truncation
# may be occurring
mloc = np.argmax(grp["value"].values)
event0 = grp.iloc[mloc]
else:
event0 = grp.iloc[0]
mag2.append(event0.value)
sigma2.append(event0.sigma)
event1 = cat1_groups.get_group(event0.eventID)
if len(event1) > 1:
# Also find the event with the largest magnitude
event1 = event1.iloc[np.argmax(event1["originID"].values)]
mag1.append(event1.value.tolist())
sigma1.append(event1.sigma.tolist())
else:
mag1.extend(event1.value.tolist())
sigma1.extend(event1.sigma.tolist())
output_catalogue = CatalogueDB()
output_catalogue.origins = catalogue.origins[
catalogue.origins.eventID.isin(common_catalogue.eventID)]
output_catalogue.magnitudes = catalogue.magnitudes[
catalogue.magnitudes.eventID.isin(common_catalogue.eventID)]
_, _ = output_catalogue._get_number_origins_magnitudes()
pair_1_key = "{:s}({:s})".format(pair1[1],pair1[0])
pair_2_key = "{:s}({:s})".format(pair2[1],pair2[0])
return OrderedDict([
(pair_1_key, np.array(mag1)),
(pair_1_key + " Sigma", np.array(sigma1)),
(pair_2_key, np.array(mag2)),
(pair_2_key + " Sigma", np.array(sigma2))]), output_catalogue
def mine_agency_magnitude_combinations(catalogue, agency_mag_data, threshold,
no_case=False):
"""
Return list of possible agency and magnitude combinations that would
exceed a threshold number of points
"""
results_dict = []
for iloc, agency_1 in enumerate(agency_mag_data):
for mag_1 in agency_mag_data[agency_1]["Magnitudes"]:
if agency_mag_data[agency_1]["Magnitudes"][mag_1] < threshold:
continue
for agency_2 in list(agency_mag_data.keys())[iloc:]:
for mag_2 in agency_mag_data[agency_2]["Magnitudes"]:
if (agency_1 == agency_2) and (mag_1 == mag_2):
# Redundent
continue
if agency_mag_data[agency_2]["Magnitudes"][mag_2] <\
threshold:
# Skip
continue
print("Trying: (%s, %s) and (%s, %s)" % (agency_1, mag_1,
agency_2, mag_2))
data, _ = get_agency_magnitude_pairs(catalogue,
(agency_1, mag_1),
(agency_2, mag_2),
no_case)
if data:
# Report number of values
data_keys = data.keys()
npairs = len(data[data_keys[0]])
if npairs > threshold:
results_dict.append(
("|".join([data_keys[0], data_keys[2]]),
data))
else:
print("----> No pairs found!")
return OrderedDict(results_dict)
def mine_agency_magnitude_combinations_to_file(output_file, catalogue,
agency_mag_data, threshold, no_case=False):
"""
Return list of possible agency and magnitude combinations that would
exceed a threshold number of points
"""
results_dict = []
fle = h5py.File(output_file, "a")
for iloc, agency_1 in enumerate(agency_mag_data):
for mag_1 in agency_mag_data[agency_1]["Magnitudes"]:
if agency_mag_data[agency_1]["Magnitudes"][mag_1] < threshold:
continue
for agency_2 in list(agency_mag_data.keys())[iloc:]:
for mag_2 in agency_mag_data[agency_2]["Magnitudes"]:
if (agency_1 == agency_2) and (mag_1 == mag_2):
# Redundent
continue
if agency_mag_data[agency_2]["Magnitudes"][mag_2] <\
threshold:
# Skip
continue
print("Trying: (%s, %s) and (%s, %s)" % (agency_1, mag_1,
agency_2, mag_2))
data, _ = get_agency_magnitude_pairs(catalogue,
(agency_1, mag_1),
(agency_2, mag_2),
no_case)
if data:
# Report number of values
data_keys = list(data.keys())
npairs = len(data[data_keys[0]])
if npairs > threshold:
combo_key = "|".join([data_keys[0],
data_keys[2]])
results_dict.append(
("|".join([data_keys[0], data_keys[2]]),
data))
dset = fle.create_dataset(combo_key,
(npairs, 4),
dtype="f")
dset[:] = np.column_stack([data[data_keys[0]],
data[data_keys[1]],
data[data_keys[2]],
data[data_keys[3]]])
else:
print("----> No pairs found!")
fle.close()
def join_query_results(data1, data2):
"""
Joins the results of two magnitude-agency queries
"""
if not data1:
if data2:
return data2
else:
return None
if not data2:
if data1:
return data1
else:
return None
joint_data = []
data2_keys = list(data2.keys())
for iloc, key in enumerate(list(data1.keys())):
if not (key == data2_keys[iloc]):
joint_key = key + " & " + data2_keys[iloc]
else:
joint_key = key
data_key = (joint_key,
np.hstack([data1[key], data2[data2_keys[iloc]]]))
joint_data.append(data_key)
return OrderedDict(joint_data)
def plot_agency_magnitude_pair(data, overlay=False, xlim=[], ylim=[],
marker="o", figure_size=(7, 8), filetype="png", resolution=300,
filename=None):
"""
Plots the agency magnitude pair
:param dict data:
Query result for a particular joint agency-magnitude pair combination
:param bool overlay:
Allows another layer to be rendered on top (True) or closes the figure
for plotting (False)
:param list xlim:
Lower and upper bounds for x-axis
:param list ylim:
Lower and upper bounds for y-axis
"""
if not data:
print("No pairs found - abandoning plot!")
return
fig = plt.figure(figsize=figure_size)
keys = list(data.keys())
plt.errorbar(data[keys[0]], data[keys[2]],
xerr=data[keys[1]], yerr=data[keys[3]],
marker=marker, mfc="b", mec="k", ls="None",
ecolor="r")
plt.xlabel(utils._to_latex(keys[0]), fontsize=16)
plt.ylabel(utils._to_latex(keys[2]), fontsize=16)
plt.grid(True)
if len(xlim) == 2:
lowx = xlim[0]
highx = xlim[1]
else:
lowx = np.floor(np.min(data[keys[0]]))
highx = np.ceil(np.max(data[keys[0]]))
if len(ylim) == 2:
lowy = ylim[0]
highy = ylim[1]
else:
lowy = np.floor(np.min(data[keys[2]]))
highy = np.ceil(np.max(data[keys[2]]))
if lowy < lowx:
lowx = lowy
if highy > highx:
highx = highy
plt.ylim(lowx, highx)
plt.xlim(lowx, highx)
# Overlay 1:1 line
plt.plot(np.array([lowx, highx]), np.array([lowx, highx]), ls="--",
color=[0.5, 0.5, 0.5], zorder=1)
plt.tight_layout()
if filename:
utils._save_image(filename, filetype, resolution)
if not overlay:
plt.show()
return data
def sample_agency_magnitude_pairs(data, xbins, ybins, number_samples=1):
"""
"""
keys = list(data.keys())
n_data = len(data[keys[0]])
if not number_samples or (number_samples == 1):
# Only one sample, return simple histogram
#print xbins, ybins
return np.histogram2d(np.around(data[keys[0]], 2),
np.around(data[keys[2]], 2),
bins=[xbins, ybins])[0]
elif (np.max(data[keys[1]]) < 1E-15) and (np.max(data[keys[3]]) < 1E-15):
# No uncertainty on magnitudes
return np.histogram2d(np.around(data[keys[0]], 2),
np.around(data[keys[2]], 2),
bins=[xbins, ybins])[0]
else:
counter = np.zeros([len(xbins) - 1, len(ybins) - 1])
for i in range(number_samples):
# Sample data sets
data_x = data[keys[0]] + data[keys[1]] * np.random.normal(0., 1.,
n_data)
data_y = data[keys[2]] + data[keys[3]] * np.random.normal(0., 1.,
n_data)
counter += np.histogram2d(data_x, data_y, bins=[xbins, ybins])[0]
return counter / float(number_samples)
def plot_agency_magnitude_density(data, overlay=False, number_samples=0,
xlim=[], ylim=[], figure_size=(7, 8), lognorm=True,
filetype="png", resolution=300, filename=None):
"""
Creates a density plot of the earthquakes corresponding to an
agency-magnitude combination
"""
keys = list(data.keys())
if not data:
print("No pairs found - abandoning plot!")
return
if len(xlim) == 2:
lowx = xlim[0]
highx = xlim[1]
else:
lowx = np.floor(np.min(data[keys[0]]))
highx = np.ceil(np.max(data[keys[0]]))
if len(ylim) == 2:
lowy = ylim[0]
highy = ylim[1]
else:
lowy = np.floor(np.min(data[keys[2]]))
highy = np.ceil(np.max(data[keys[2]]))
if lowy < lowx:
lowx = lowy
if highy > highx:
highx = highy
xbins = np.linspace(lowx - 0.05, highx + 0.05,
((highx + 0.05 - lowx - 0.05) / 0.1) + 2.0)
ybins = np.linspace(lowx - 0.05, highx + 0.05,
((highx + 0.05 - lowx - 0.05) / 0.1) + 2.0)
density = sample_agency_magnitude_pairs(data, xbins, ybins, number_samples)
fig = plt.figure(figsize=figure_size)
if lognorm:
cmap = deepcopy(matplotlib.cm.get_cmap("jet"))
data_norm = LogNorm(vmin=0.1, vmax=np.max(density))
else:
cmap = deepcopy(matplotlib.cm.get_cmap("jet"))
cmap.set_under("w")
data_norm = Normalize(vmin=0.1, vmax=np.max(density))
#density[density < 1E-15] == np.nan
plt.pcolormesh(xbins[:-1] + 0.05, ybins[:-1] + 0.05, density.T,
norm=data_norm, cmap=cmap)
cbar = plt.colorbar()
cbar.set_label("Number Events", fontsize=16)
plt.xlabel(utils._to_latex(keys[0]), fontsize=16)
plt.ylabel(utils._to_latex(keys[2]), fontsize=16)
plt.grid(True)
plt.ylim(lowx, highx)
plt.xlim(lowx, highx)
# Overlay 1:1 line
plt.plot(np.array([lowx, highx]), np.array([lowx, highx]), ls="--",
color=[0.5, 0.5, 0.5], zorder=1)
plt.tight_layout()
if filename:
utils._save_image(filename, filetype, resolution)
if not overlay:
plt.show()
return data
DEFAULT_SIGMA = {"minimum": lambda x : np.nanmin(x),
"maximum": lambda x : np.nanmax(x),
"mean": lambda x : np.nanmean(x)}
def extract_scale_agency(key):
"""
Extract the magnitude scale and the agency from within the parenthesis
Cases: "Mw(XXX)" or "Mw(XXX) & Mw (YYY)" or "Mw(XXX) & Ms(YYY)"
"""
# Within parenthesis compiler
wip = re.compile(r'(?<=\()[^)]+(?=\))')
# Out of parenthesis compiler
oop = re.compile(r'(.*?)\(.*?\)')
# Get the agencies
agencies = wip.findall(key)
if len(agencies) == 1:
# Simple case - only one agency
# Get the scale
scale = oop.findall(key)
return scale[0], agencies[0]
elif len(agencies) > 1:
# Multiple agencies
agencies = "|".join(agencies)
scales = oop.findall(key)
# Strip any spaces and '&'
nscales = []
for scale in scales:
scale = scale.replace("&", "")
scale = scale.replace(" ", "")
nscales.append(scale)
if nscales.count(nscales[0]) == len(nscales):
# Same magnitude scale
scales = nscales[0]
else:
# join scales
scales = "|".join(nscales)
return scales, agencies
else:
raise ValueError("Badly formatted key %s" % key)
class CatalogueRegressor(object):
"""
Class to perform an orthodonal distance regression on a pair of magnitude
data tuples
:param dict data:
Output of agency-magnitude query
:param common_catalogue:
Catalogue of common events as instance of :class: CatalogueDB
:param list keys():
List of keys in the data set
:param model:
Regression model (eventually as instance of :class: scipy.odr.Model)
:param regression_data:
Regression data (eventually as instance of :class: scipy.ord.RealData)
:param results:
Regression results as instance of :class: scipt.odr.Output
:param str model_type:
Type of model used for regression
"""
def __init__(self, data, common_catalogue=None):
"""
Instantiate with data
"""
self.data = data
self.common_catalogue = common_catalogue
self.keys = list(self.data.keys())
# Retrieve the scale and agency information from keys
self.x_scale, self.x_agency = extract_scale_agency(self.keys[0])
self.y_scale, self.y_agency = extract_scale_agency(self.keys[2])
self.model = None
self.regression_data = None
self.results = None
self.model_type = None
self.standard_deviation = None
@classmethod
def from_catalogue(cls, catalogue, pair1, pair2, no_case=False):
"""
Class method to instansiate the regression object with the agency-
magnitude query parameters
:param catalogue:
Earthquake catalogue as instance of :class: CatalogueDB
:params tuple pair1:
Agency and magnitude combination (Agency, Magnitude Type) for
defining the independent variable
:params tuple pair2:
Agency and magnitude combination (Agency, Magnitude Type) for
defining the dependent variable
:params bool no_case:
Makes the selection case sensitive (True) or ignore case (False)
"""
data, common_catalogue = \
get_agency_magnitude_pairs(catalogue, pair1, pair2, no_case)
if not data:
raise ValueError("Cannot build regression!")
return cls(data, common_catalogue)
@classmethod
def from_array(cls, data, keys):
"""
Class method to build the regression object from a simple four-column
array of data and the corresponding keys
"""
data_keys = keys.split("|")
data_dict = OrderedDict([
(data_keys[0], data[:, 0]),
(data_keys[0] + " Sigma", data[:, 1]),
(data_keys[1], data[:, 2]),
(data_keys[1] + " Sigma", data[:, 3])])
return cls(data_dict)
def plot_data(self, overlay, xlim=[], ylim=[], marker="o",
figure_size=(7, 7), filetype="png", resolution=300, filename=None):
"""
Plots the result of the agency-magnitude query
"""
plot_agency_magnitude_pair(self.data, overlay, xlim, ylim, marker,
figure_size, filetype, resolution, filename)
def plot_density(self, overlay, xlim=[], ylim=[], lognorm=True, sample=0,
figure_size=(7, 7), filetype="png", resolution=300, filename=None):
"""
Plots the result of the agency-magnitude query
"""
plot_agency_magnitude_density(self.data, overlay, sample, xlim, ylim,
figure_size, lognorm, filetype, resolution, filename)
def run_regression(self, model_type, initial_params, setup_parameters={}):
"""
Runs the regression analysis on the retreived data
:param str model_type:
Model type. Choose from {"polynomial", "piecewise", "exponential",
"2segmentM#.#"} where M#.# is the corner magnitude
:param list initial_params:
Initial estimate of the parameters
* polynomial = [c_1, c_2, c_3, ...] where
f(X) = \Sum_i^N c_i X^{i-1}
* piecewise = [m_1, m_2, ..., m_i, xc_1, xc_2, ..., xc_i-1, c]
* exponential =[c_1, c_2, c_3] where f(X) = exp(c_1 + c_2 X) + c_3
* 2segmentM#.# = [m_1, m_2, c_1] where m_1 and m_2 are the gradient
of slope 1 and 2, respectively, and c_1 is the intercept
:param dict setup_parameters:
Optionl parameters to control how to define missing uncertainties
"""
if "2segment" in model_type:
model_type, mag = model_type.split("M")
mag = float(mag)
self.model_type = function_map[model_type](mag)
else:
if not model_type in function_map:
raise ValueError("Model type %s not supported!" % model_type)
self.model_type = function_map[model_type]()
self.model = odr.Model(self.model_type.run)
if (model_type=="exponential") and (len(initial_params) != 3):
raise ValueError("Exponential model requires three initial "
"parameters")
setup_parameters.setdefault("Missing X", "Default")
setup_parameters.setdefault("Missing Y", "Default")
setup_parameters.setdefault("sx", 0.1)
setup_parameters.setdefault("sy", 0.1)
# Setup X
s_x = self.data[self.keys[1]]
idx = (np.isnan(s_x)) | (s_x < 1E-20)
if np.any(idx):
# Need to apply default sigma values
if (setup_parameters["Missing X"] == "Default") or np.all(idx):
s_x[idx] = setup_parameters["sx"]
else:
s_x[idx] = DEFAULT_SIGMA[setup_parameters["Missing X"]](s_x)
# Setup Y
s_y = self.data[self.keys[3]]
idx = (np.isnan(s_y)) | (s_y < 1E-20)
if np.any(idx):
# Need to apply default sigma values
if (setup_parameters["Missing Y"] == "Default") or np.all(idx):
s_y[idx] = setup_parameters["sy"]
else:
s_y[idx] = DEFAULT_SIGMA[setup_parameters["Missing Y"]](s_y)
self.regression_data = odr.RealData(self.data[self.keys[0]],
self.data[self.keys[2]],
sx=s_x,
sy=s_y)
regressor = odr.ODR(self.regression_data,
self.model,
initial_params)
regressor.set_iprint(final=2)
self.results = regressor.run()
return self.results
def plot_model(self, overlay, xlim=[], ylim=[], marker="o", line_color="g",
figure_size=(7, 8), filetype="png",
resolution=300, filename=None):
"""
Plots the resulting regression model of the data
"""
# Plot data
plot_agency_magnitude_pair(self.data, True,
xlim, ylim,
marker, figure_size)
# Plot Model
model_x, model_y, self.standard_deviation = self.retrieve_model()
title_string = self.model_type.get_string(self.keys[2], self.keys[0])
plt.plot(model_x, model_y, line_color,
linewidth=2.0,
label=title_string)
plt.legend(loc=2, frameon=False)
if filename:
utils._save_image(filename, filetype, resolution)
if not overlay:
plt.show()
def plot_model_density(self, overlay, sample, xlim=[], ylim=[],
line_color="g", figure_size=(7, 8), lognorm=True, filetype="png",
resolution=300, filename=None):
"""
Plots the resulting regression model of the data
"""
# Plot data
plot_agency_magnitude_density(self.data, True, sample, xlim, ylim,
figure_size, lognorm)
# Plot Model
model_x, model_y, self.standard_deviation = self.retrieve_model()
title_string = self.model_type.get_string(self.keys[2], self.keys[0])
plt.plot(model_x, model_y, line_color,
linewidth=2.0,
label=title_string)
#plt.title(r"{:s}".format(title_string), fontsize=14)
plt.legend(loc=2, frameon=False)
if filename:
utils._save_image(filename, filetype, resolution)
if not overlay:
plt.show()
def plot_magnitude_conversion_model(self, model, overlay, line_color="g",
filetype="png", resolution=300, filename=None):
"""
Plots a specific magnitude conversion model (to overlay on top of
a current figure)
"""
model_x = np.arange(0.9 * np.min(self.data[self.keys[0]]),
1.1 * np.max(self.data[self.keys[0]]),
0.01)
model_y, _ = model.convert_value(model_x, 0.0)
plt.plot(model_x, model_y, line_color,
linewidth=2.0,
label=model.model_name)
plt.legend(loc=2, frameon=False)
if filename:
utils._save_image(filename, filetype, resolution)
if not overlay:
plt.show()
def retrieve_model(self):
"""
Returns a set of x- and y-values for the given model
"""
model_x = np.arange(0.9 * np.min(self.data[self.keys[0]]),
1.1 * np.max(self.data[self.keys[0]]),
0.01)
model_y = self.model_type.run(self.results.beta, model_x)
standard_deviation = self.get_standard_deviation()
return model_x, model_y, standard_deviation
def get_standard_deviation(self, default=True):
"""
Returns the "default" standard deviations for each function. In the
case of the piecewise functions a different standard deviation is
given for each segment for the default setting. Otherwise a single
total standard deviation is defined for the whole function
"""
if default and isinstance(self.model_type, function_map["2segment"]):
idx = self.data[self.keys[0]] < self.model_type.corner_magnitude
data_xl = self.data[self.keys[0]][idx]
data_yl = self.data[self.keys[2]][idx]
sigma_l = np.std(data_yl -
self.model_type.run(self.results.beta, data_xl))
idx = self.data[self.keys[0]] >= self.model_type.corner_magnitude
data_xu = self.data[self.keys[0]][idx]
data_yu = self.data[self.keys[2]][idx]
sigma_u = np.std(data_yu -
self.model_type.run(self.results.beta, data_xu))
standard_deviation = [sigma_l, sigma_u]
elif default and isinstance(self.model_type,
function_map["piecewise"]):
standard_deviation = []
npar = len(self.results.beta)
corner_magnitudes = [-np.inf]
corner_magnitudes.extend(self.results.beta[(npar / 2):(npar - 1)])
corner_magnitudes.extend(np.inf)
for iloc, m_c in range(0, len(corner_magnitudes) - 1):
idx = np.logical_and(
self.data[self.keys[0]] >= m_c,
self.data[self.keys[0]] < corner_magnitudes[iloc + 1])
data_x = self.data[self.keys[0]][idx]
data_y = self.data[self.keys[2]][idx]
standard_deviation.append(
np.std(data_y - self.model_type.run(self.results.beta,
data_x)))
else:
standard_deviation = np.std(
self.data[self.keys[2]] -
self.model_type.run(self.results.beta, self.data[self.keys[0]])
)
return standard_deviation
def get_magnitude_conversion_model(self):
"""
Returns the regression model as an instance of :class:
eqcat.isc_homogenisor.MagnitudeConversionRule
"""
standard_deviation = self.get_standard_deviation()
return self.model_type.to_conversion_rule(self.x_agency, self.x_scale,
self.results.beta,
standard_deviation)
def get_catalogue_residuals(self, catalogue=None):
"""
Returns a list of normalised residuals and their corresponding
events
"""
if not catalogue:
catalogue = self.common_catalogue
rule = self.get_magnitude_conversion_model()
# Group magnitudes and origins by event ID
mag_grps = catalogue.magnitudes.groupby("eventID")
orig_grps = catalogue.origins.groupby("eventID")
output = []
for event_id, event in mag_grps:
input_x, observed_y, input_x_origin, observed_y_origin,\
input_x_row, observed_y_row, event_datetime =\
self._extract_event_data(event,
orig_grps.get_group(event_id))
if input_x and observed_y:
residual, expected_y, sigma = rule.get_residual(input_x,
observed_y)
event_data = {
"residual": residual,
"x_mag": input_x,
"y_obs": observed_y,
"y_model": expected_y,
"stddev": sigma,
"x_mag_data": input_x_row,
"x_orig_data": input_x_origin,
"y_mag_data": observed_y_row,
"y_orig_data": observed_y_origin,
"datetime": event_datetime}
output.append(event_data)
return output
def _extract_event_data(self, event, orig_grp):
"""
Residual plots with time need to assign a single date/time to the
event. There can be cases, however, where the selected magnitude
is associated to an origin not present in the origins (due to
agency filtering). This selects (by preference) the y-origin and if
not available then the x-origin
"""
input_x = None
observed_y = None
input_x_origin = None
observed_y_origin = None
input_x_row = None
observed_y_row = None
orig_grps = orig_grp.groupby("originID")
for _, row in event.iterrows():
if row.magAgency == self.y_agency and\
row.magType.lower() == self.y_scale.lower():
observed_y = row.value
observed_y_row = deepcopy(row)
if row.originID in orig_grps.groups:
observed_y_origin = orig_grps.get_group(row.originID)
if row.magAgency == self.x_agency and\
row.magType.lower() == self.x_scale.lower():
input_x = row.value
input_x_row = deepcopy(row)
if row.originID in orig_grps.groups:
input_x_origin = orig_grps.get_group(row.originID)
if observed_y_origin is not None:
event_sec = observed_y_origin.second
event_microsec = int((event_sec - np.floor(event_sec)) * 1E6)
event_sec = int(event_sec)
event_datetime = datetime(observed_y_origin.year,
observed_y_origin.month,
observed_y_origin.day,
observed_y_origin.hour,
observed_y_origin.minute,
event_sec, event_microsec)
elif input_x_origin is not None:
event_sec = input_x_origin.second
event_microsec = int((event_sec - np.floor(event_sec)) * 1E6)
event_sec = int(event_sec)
event_datetime = datetime(input_x_origin.year,
input_x_origin.month,
input_x_origin.day,
input_x_origin.hour,
input_x_origin.minute,
event_sec, event_microsec)
else:
row = orig_grp.iloc[0]
# Take from the last location
event_sec = row.second
event_microsec = int((event_sec - np.floor(event_sec)) * 1E6)
event_sec = int(event_sec)
event_datetime = datetime(row.year,
row.month,
row.day,
row.hour,
row.minute,
event_sec, event_microsec)
return input_x, observed_y, input_x_origin, observed_y_origin,\
input_x_row, observed_y_row, event_datetime
def plot_residuals_magnitude(self, residuals=None, catalogue=None,
normalised=True, xlim=[], ylim=None,
figure_size=(8, 8), filename=None,
filetype="png", dpi=300):
"""
Plots the residuals with respect to magnitude
"""
if not residuals:
residuals = self.get_catalogue_residuals(catalogue)
yvals = []
xvals = []
for residual in residuals:
if normalised:
yvals.append(residual["residual"])
else:
yvals.append(residual["y_obs"] - residual["y_model"])
xvals.append(residual["x_mag"])
fig = plt.figure(figsize=figure_size)
ax = fig.add_subplot(111)
ax.scatter(xvals, yvals, s=40, c="b", marker="o", edgecolors="w")
if len(xlim) == 2:
lb, ub = xlim
else:
lb = np.floor(np.min(xvals))
ub = np.ceil(np.max(xvals))
ax.set_xlim(lb, ub)
if not ylim:
ylim = np.ceil(np.max(np.abs(yvals)))
ax.set_ylim(-ylim, ylim)
ax.grid(True)
ax.set_xlabel("%s (%s)" % (self.x_scale, self.x_agency), fontsize=18)
if normalised:
ax.set_ylabel(r"$\varepsilon$", fontsize=18)
else:
ax.set_ylabel("%s (%s) - %s (%s) " % (self.y_scale, self.y_agency,
self.x_scale, self.x_agency),
fontsize=18)
if filename:
plt.savefig(filename, format=filetype, dpi=dpi,
bbox_inches="tight")
def plot_residuals_time(self, residuals=None, catalogue=None,
normalised=True, ylim=None, figure_size=(9, 6),
filename=None, filetype="png", dpi=300):
"""
Produces a plot of the residuals with respect to time, color scaled
by magnitude
"""
if not residuals:
residuals = self.get_catalogue_residuals(catalogue)
yvals = []
xvals = []
xmags = []
for residual in residuals:
if normalised:
yvals.append(residual["residual"])
else:
yvals.append(residual["y_obs"] - residual["y_model"])
xvals.append(residual["datetime"])
xmags.append(residual["x_mag"])
fig = plt.figure(figsize=figure_size)
ax = fig.add_subplot(111)
cb = ax.scatter(xvals, yvals, s=40, marker="o", c=xmags,
edgecolors="w", cmap=plt.cm.get_cmap("plasma"))
fig.colorbar(cb)
ax.grid(True)
ax.fmt_xdata = mdates.DateFormatter("%Y")
if not ylim:
ylim = np.ceil(np.max(np.abs(yvals)))
ax.set_ylim(-ylim, ylim)
ax.set_xlabel("Date", fontsize=18)
if normalised:
ax.set_ylabel(r"$\varepsilon$", fontsize=18)
else:
ax.set_ylabel("%s (%s) - %s (%s) " % (self.y_scale, self.y_agency,
self.x_scale, self.x_agency),
fontsize=18)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
if filename:
plt.savefig(filename, format=filetype, dpi=dpi,
bbox_inches="tight")
def plot_model_residuals(self, residuals=None, catalogue=None,
normalised=True, lims=[], ylim=None,
figure_size=(7, 8), filename=None,
filetype="png", dpi=300):
"""
Produces a full breakdown of model and residuals
"""
if not residuals:
residuals = self.get_catalogue_residuals(catalogue)
yvals = []
xvals = []
xmags = []
ymags = []
for residual in residuals:
if normalised:
yvals.append(residual["residual"])
else:
yvals.append(residual["y_obs"] - residual["y_model"])
xvals.append(residual["datetime"])
xmags.append(residual["x_mag"])
ymags.append(residual["y_obs"])
# Plot the main model
fig = plt.figure(figsize=(12, 7))
gs = gridspec.GridSpec(2, 2)
ax1 = fig.add_subplot(gs[:, 0])
ax1.plot(xmags, ymags, "bo", markeredgecolor="w")
if lims:
lb, ub = lims
else:
lb = min(np.floor(np.min(xmags)),
np.floor(np.min(xmags)))
ub = max(np.ceil(np.max(ymags)),
np.ceil(np.max(ymags)))
ax1.plot([lb, ub], [lb, ub], "--", color=[0.5, 0.5, 0.5])
ax1.set_xlim(lb, ub)
ax1.set_ylim(lb, ub)
model_x = np.arange(lb, ub + 0.01, 0.01)
rule = self.get_magnitude_conversion_model()
model_y = np.array([rule.convert_value(x, 0.0)[0] for x in model_x])
ax1.plot(model_x, model_y, "r-", lw=2.)
ax1.set_xlabel(r"%s (%s)" % (self.x_scale, self.x_agency),
fontsize=18)
ax1.set_ylabel(r"%s (%s)" % (self.y_scale, self.y_agency),
fontsize=18)
ax1.set_title(self.model_type.get_string(self.keys[2], self.keys[0]),
fontsize=16)
ax1.grid(True)
# Plot the residuals with time
ax2 = fig.add_subplot(gs[1, 1])
cb = ax2.scatter(xvals, yvals, s=40, c=ymags, edgecolors="w",
cmap=plt.cm.get_cmap("plasma"))
fig.colorbar(cb)
ax2.grid(True)
ax2.fmt_xdata = mdates.DateFormatter("%Y")
if not ylim:
iylim = np.ceil(np.max(np.abs(yvals)))
ax2.set_ylim(-iylim, iylim)
else:
ax2.set_ylim(-ylim, ylim)
ax2.set_xlabel("Date", fontsize=18)
if normalised:
ax2.set_ylabel(r"$\varepsilon$", fontsize=18)
else:
ax2.set_ylabel(
"%s (%s) - %s (%s) " % (self.y_scale, self.y_agency,
self.x_scale, self.x_agency),
fontsize=18)
for tick in ax2.get_xticklabels():
tick.set_rotation(45)
# Plot residuals with magnitude
ax3 = fig.add_subplot(gs[0, 1])
ax3.scatter(xmags, yvals, s=40, c="b", edgecolors="w")
ax3.set_xlim(lb, ub)
ax3.grid(True)
if not ylim:
iylim = np.ceil(np.max(np.abs(yvals)))
ax3.set_ylim(-iylim, iylim)
else:
ax3.set_ylim(-ylim, ylim)
if normalised:
ax3.set_ylabel(r"$\varepsilon$", fontsize=18)
else:
ax3.set_ylabel(
"%s (%s) - %s (%s) " % (self.y_scale, self.y_agency,
self.x_scale, self.x_agency),
fontsize=18)
# Cleanup and save file if needed
plt.tight_layout()
if filename:
plt.savefig(filename, format=filetype, dpi=dpi,
bbox_inches="tight")
def plot_catalogue_map(config, catalogue, magnitude_scale=False,
color_norm=None, overlay=False, figure_size=(7,8), filename=None,
filetype="png", dpi=300):
"""
Creates a map of the catalogue
"""
plt.figure(figsize=figure_size)
lat0 = config["llat"] + ((config["ulat"] - config["llat"]) / 2)
lon0 = config["llon"] + ((config["ulon"] - config["llon"]) / 2)
map1 = Basemap(llcrnrlon=config["llon"], llcrnrlat=config["llat"],
urcrnrlon=config["ulon"], urcrnrlat=config["ulat"],
projection='stere', resolution=config['resolution'],
area_thresh=1000.0, lat_0=lat0, lon_0=lon0)
map1.drawcountries()
map1.drawmapboundary()
map1.drawcoastlines()
map1.drawstates()
parallels = np.arange(config["llat"],
config["ulat"] + config["parallel"],
config["parallel"])
meridians = np.arange(config["llon"],
config["ulon"] + config["meridian"],
config["meridian"])
map1.drawparallels(parallels, color=[0.5, 0.5, 0.5],
labels=[1, 0, 0, 0], fontsize=12)
map1.drawmeridians(meridians, color=[0.5, 0.5, 0.5],
labels=[0, 0, 0, 1], fontsize=12)
map1.drawmapboundary(fill_color='#C2DFFF')
map1.fillcontinents(color='wheat', lake_color="#C2DFFF")
lon, lat = map1(catalogue.origins["longitude"].values,
catalogue.origins["latitude"].values)
if magnitude_scale:
magnitudes = []
mag_grps = catalogue.magnitudes.groupby("originID")
for key in catalogue.origins.originID.values:
if key in catalogue.magnitudes.originID.values:
grp = mag_grps.get_group(key)
if magnitude_scale in grp.magType.values:
magnitudes.append(
grp[grp.magType==magnitude_scale].value.values[0])
else:
magnitudes.append(1.0)
#print magnitudes
magnitudes = np.array(magnitudes) ** 2.0
else:
magnitudes = 10.0
map1.scatter(lon, lat,
marker="o",
s=magnitudes,
c=catalogue.origins["depth"].values,
norm=color_norm,
alpha=1.0,
linewidths=0.1,
edgecolor="w",
zorder=5)
cbar = map1.colorbar()
cbar.set_label("Depth", fontsize=14)
if filename:
plt.savefig(filename, format=filetype, dpi=dpi)
if not overlay:
plt.show()
| g-weatherill/catalogue_toolkit | eqcat/catalogue_query_tools.py | Python | agpl-3.0 | 60,867 | 0.001824 |
import socket
from subprocess import Popen, PIPE, STDOUT
import os
import time
import string
import requests
import json
import omxplayer
class UnsupportedFileTypeException(Exception):
'''Raised if the file type is not among the list of supported types'''
pass
class FileNotFoundException(Exception):
'''raised if the file is not valid'''
pass
class OmxCommsError(Exception):
'''raised if a command failed to execute'''
pass
class Omx(object):
def __init__(self):
# connection attrs
# private playlist var, stores list of file paths
# mirrors the list in the player at all times
self._playlist = []
self._player = None
# used to determine if a
self.supported = ["mp4", "avi", "mkv",
"flv", ".aac", "3gp"] # add more later
# creating an instance of the vlc window
# local socket connection to the vlc player
@property
def playlist(self):
'''returns list of file paths'''
return self._playlist
@property
def connection_open(self):
return self._player.is_playing()
@playlist.setter
def playlist(self, arg):
"""Takes a string, tuple or a list as an argument and
updates the player's playlist and the local_playlist variable
enqueues the vlc object with a playlist of all the files stored in it
can only add files to the playlist"""
if isinstance(arg, (list, tuple)):
for path in arg:
self.check_path(path)
if not path in self._playlist:
data = self._enqueue(path)
elif isinstance(arg, str):
self.check_path(arg)
if not arg in self._playlist:
data = self._enqueue(arg)
@playlist.deleter
def playlist(self):
'''clears the local playlist var and the remote one'''
self._playlist = []
self.clear()
def create_player(self):
if self.playlist == []:
raise Exception("The video player has no files ot add")
else:
self._player = omxplayer.OMXPlayer(self._playlist[0])
def check_path(self, path):
'''Ensures all files added to the application are
valid paths.'''
if not os.path.isfile(path):
raise FileNotFoundException()
path, file = os.path.split(path)
name, ext = file.split(".")
if ext not in self.supported:
raise UnsupportedFileTypeException()
def toggle_fullscreen(self):
'''For compatibility'''
return True
def toggle_loop(self):
'''for compatibility'''
return True
def pause(self):
"""Checks the current state to make sure the player is playing something"""
if self._player:
self._player.pause()
def play(self):
"""First checks if a valid file is currently loaded."""
if self._player:
self._player.play()
def stop(self):
"""checks first if there is something to stop"""
if self._player:
self._player.stop()
def _enqueue(self, path):
'''adds a file to the playlist'''
self.playlist = path
def clear(self):
'''clears all files from the playlist'''
del self.playlist
def playlist_loop(self):
"""Get the currently playing video
get its remaining time by subtracting its
current time from its duration and creating a new instance for each file"""
if not self._player:
self.create_player()
while True:
time.sleep(0.5)
remaining = self._player.duration() - self._player.position()
if remaining < 1:
current = self._playlist.index(self._player.get_source())
if current < len(self._playlist) - 2:
next = self._playlist[current + 1]
else: next = self._playlist[0]
self._player.load(next)
| nakamura9/deploy_ad_server | client/omxplayer/myomx.py | Python | mit | 4,075 | 0.004663 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import cm
import operator
import os
import ConfigParser
import string
config = ConfigParser.ConfigParser()
config.read(os.environ["HOME"] + "/.abook/addressbook")
config.remove_section('format')
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
buchstabe = "A"
Title = "Hello world"
pageinfo = "platypus example"
def Pages(canvas, doc):
canvas.saveState()
canvas.restoreState()
def go(buchstabe):
doc = SimpleDocTemplate("phello.pdf")
Story = []
style = styles["Normal"]
addresses=[]
for s in config.sections():
nb=""
ub=""
mb=""
if config.has_option(s,'name'):
nb = "<b>" + config.get(s,'name') + "</b><br/>"
worte=config.get(s,'name').split()
print len(worte)
if len(worte)<2:
nachname=worte[0]
else:
nachname=worte[1]
anfangsbuchstabe=nachname[0:1]
if anfangsbuchstabe!=buchstabe:
buchstabe=anfangsbuchstabe
print buchstabe
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
Story.append(PageBreak())
addresses=[]
if config.has_option(s,'address'):
nb = nb + config.get(s,'address') + "<br/>"
if config.has_option(s,'zip'):
nb = nb + config.get(s,'zip') + " "
if config.has_option(s,'city'):
nb = nb + config.get(s,'city') + "<br/>"
if config.has_option(s,'state'):
nb = nb + config.get(s,'state') + " - "
if config.has_option(s,'country'):
nb = nb + config.get(s,'country') + "<br/>"
nb = nb +"<br/>"
if config.has_option(s,'phone'):
ub= "Fon: " + config.get(s,'phone') + "<br/>"
if config.has_option(s,'mobile'):
ub= ub + "Mobi: " + config.get(s,'mobile') + "<br/>"
if config.has_option(s,'email'):
ub= ub + config.get(s,'email').replace(',','<br/>') + "<br/>"
ub=ub+"<br/>"
if config.has_option(s,'custom3'):
mb= config.get(s,'custom3') + "<br/>"
mb=mb+"<br/>"
nameblock = Paragraph(nb,style)
numberblock = Paragraph(ub,style)
middleblock = Paragraph(mb,style)
addresses.append([nameblock,middleblock,numberblock])
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
doc.build(Story, onFirstPage=Pages, onLaterPages=Pages)
go(buchstabe)
| andydrop/x17papertrail | abook2pdf.py | Python | gpl-3.0 | 3,106 | 0.027688 |
import sqlite3
class Database:
def __init__(self, dbfile, page_rows=100):
self.dbfile = dbfile
self.page_rows = page_rows
self.conn = sqlite3.connect(self.dbfile)
self.conn.row_factory = sqlite3.Row
cursor = self.conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS messages "
"(timestamp TEXT, message TEXT);"
)
cursor.execute(
"CREATE INDEX IF NOT EXISTS messages_timestamp_idx "
"ON messages (timestamp);"
)
self.conn.commit()
def __del__(self):
if self.conn:
self.conn.close()
self.conn = None
def count(self):
cursor = self.conn.cursor()
n = cursor.execute("SELECT COUNT(*) FROM messages").fetchone()[0]
return n
def messages(self, offset=0):
cursor = self.conn.cursor()
rows = cursor.execute(
"SELECT * FROM messages "
"ORDER BY timestamp DESC "
"LIMIT ? "
"OFFSET ?",
[self.page_rows, offset]
).fetchall()
return [ dict(row) for row in rows ]
def save(self, item):
saved = False
if item.item_type == 'message':
timestamp = item.content['timestamp']
message = item.asJson()
cursor = self.conn.cursor()
cursor.execute(
"INSERT INTO messages VALUES (?,?)",
[timestamp, message]
)
self.conn.commit()
saved = True
return saved
| ant9000/websup | cli/db.py | Python | gpl-3.0 | 1,581 | 0.003163 |
from flask import Flask
from os.path import expanduser
def create_app():
app = Flask(__name__)
app.config.from_pyfile(expanduser('~/.directory-tools.py'))
from directory_tools.frontend import frontend
app.register_blueprint(frontend)
return app
| FunTimeCoding/directory-tools | directory_tools/application.py | Python | mit | 269 | 0 |
import array
class vec(object):
@staticmethod
def sized(size, type='d'):
return vec([0] * size, type)
@staticmethod
def of(content, type='d'):
return vec(content, type)
def __init__(self, content, type='d'):
self.size = len(content)
self.type = type
self.array = array.array(type, content)
def __add__(self, other):
return self.add(other)
def add(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * self.size, self.type)
if self.size != other.size:
raise Exception("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] + other.array[i]
i += 1
return result
def __sub__(self, other):
return self.sub(other)
def sub(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * self.size, self.type)
if self.size != other.size:
raise Exception("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] - other.array[i]
i += 1
return result
def __mul__(self, other):
return self.mul(other)
def mul(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * self.size, self.type)
if self.size != other.size:
raise Exception("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] * other.array[i]
i += 1
return result
| planrich/pypy-simd-benchmark | vec.py | Python | gpl-3.0 | 1,851 | 0.002161 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Server Specific Configurations
server = {
'port': '9859',
'host': '0.0.0.0',
}
# Pecan Application Configurations
app = {
'root': 'payload.api.controllers.root.RootController',
'modules': ['payload.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/payload/api/templates',
}
| gorocacher/payload | payload/api/config.py | Python | apache-2.0 | 954 | 0 |
#!/usr/bin/env python
"""
$ python cmdln_main2.py
This is my shell.
$ python cmdln_main2.py foo
hello from foo
"""
import sys
import cmdln
class Shell(cmdln.RawCmdln):
"This is my shell."
name = "shell"
def do_foo(self, argv):
print("hello from foo")
if __name__ == "__main__":
shell = Shell()
retval = shell.cmd(sys.argv[1:]) # just run one command
sys.exit(retval)
| hfeeki/cmdln | test/cmdln_main2.py | Python | mit | 421 | 0.011876 |
# coding: utf-8
from libs.redis_storage import db1
class User(object):
def __init__(self, **kwargs):
pk = kwargs.get('pk') or db1.incr('new_user_id')
kwargs['pk'] = pk
db1.hmset('user::{}'.format(pk), kwargs)
super(User, self).__setattr__('pk', pk)
super(User, self).__setattr__(
'__info__',
db1.hgetall(self.db_key) or {}
)
for k, v in self.__info__.iteritems():
self.__info__[k] = v.decode('utf-8')
@property
def short_info(self):
return {field: getattr(self, field) for field in [
'fio',
'sex',
'avatar',
'battles',
'wins',
'defeats',
'last_update'
]}
@property
def db_key(self):
return 'user::{}'.format(self.pk)
@property
def fio(self):
return u'{} {}'.format(self.last_name or u'', self.first_name or u'')
@property
def battles(self):
return int(self.__info__.get('battles', 0))
@property
def wins(self):
return int(self.__info__.get('wins', 0))
@property
def defeats(self):
return int(self.__info__.get('defeats', 0))
@property
def last_update(self):
return int(self.__info__.get('last_update', 0))
def __setattr__(self, attr, value):
self.__info__[attr] = value
db1.hset(self.db_key, attr, value)
def __getattr__(self, attr):
return self.__info__.get(attr)
def incr(self, attr, by=1):
db1.hincrby(self.db_key, attr, by)
def get_user_by_service(service, service_user_id):
user_pk = db1.get('{}_user_id::{}'.format(service, service_user_id))
if user_pk:
return User(pk=user_pk)
def add_service_to_user(service, service_user_id, user_pk):
db1.set('{}_user_id::{}'.format(service, service_user_id), user_pk)
user = User(pk=user_pk)
setattr(user, '{}_user_id'.format(service), service_user_id)
| beslave/space-king | space_king/models/user.py | Python | gpl-3.0 | 1,986 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import logging
import requests
from cherrypy.lib.auth2 import require, member_of
from urllib import urlencode
from json import loads
from htpc.helpers import get_image, serve_template, fix_basepath
from StringIO import StringIO
from contextlib import closing
class Mylar(object):
def __init__(self):
self.logger = logging.getLogger('modules.mylar')
htpc.MODULES.append({
'name': 'Mylar',
'id': 'mylar',
'test': htpc.WEBDIR + 'mylar/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'mylar_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'mylar_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'mylar_host'},
{'type': 'text', 'label': 'Port *', 'name': 'mylar_port'},
{'type': 'text', 'label': 'Basepath', 'name': 'mylar_basepath'},
{'type': 'text', 'label': 'API key', 'name': 'mylar_apikey'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'mylar_ssl'},
{"type": "text", "label": "Reverse proxy link", "placeholder": "", "desc": "Reverse proxy link ex: https://hp.domain.com", "name": "mylar_reverse_proxy_link"}
]
})
@cherrypy.expose()
@require()
def index(self):
return serve_template('mylar.html',
scriptname='mylar',
webinterface=Mylar.webinterface()
)
@cherrypy.expose()
@require()
def GetThumb(self, url=None, thumb=None, h=None, w=None, o=100):
""" Parse thumb to get the url and send to htpc.proxy.get_image """
self.logger.debug("Trying to fetch image via %s", url)
if url is None and thumb is None:
# To stop if the image is missing
return
# Should never used thumb, to lazy to remove it
if thumb:
url = thumb
return get_image(url, h, w, o)
@cherrypy.expose()
@require()
def viewcomic(self, artist_id):
response = self.fetch('getComic&id=%s' % artist_id)
for a in response['comic']:
a['StatusText'] = _get_status_icon(a['Status'])
a['can_download'] = True if a['Status'] not in ('Downloaded', 'Snatched', 'Wanted') else False
template = htpc.LOOKUP.get_template('mylar_view_comic.html')
return template.render(
scriptname='mylar_view_comic',
comic_id=artist_id,
comic=response['comic'][0],
comicimg=response['comic'][0]['ComicImageURL'],
issues=response['issues'],
description=response['comic'][0]['Description'],
module_name=htpc.settings.get('mylar_name', 'Mylar')
)
@staticmethod
def _build_url(ssl=None, host=None, port=None, base_path=None):
ssl = ssl or htpc.settings.get('mylar_ssl')
host = host or htpc.settings.get('mylar_host')
port = port or htpc.settings.get('mylar_port')
path = fix_basepath(htpc.settings.get('mylar_basepath', '/'))
url = '{protocol}://{host}:{port}{path}'.format(
protocol='https' if ssl else 'http',
host=host,
port=port,
path=path,
)
return url
@staticmethod
def webinterface():
url = Mylar._build_url()
if htpc.settings.get('mylar_reverse_proxy_link'):
url = htpc.settings.get('mylar_reverse_proxy_link')
return url
@staticmethod
def _build_api_url(command, url=None, api_key=None):
return '{url}api?apikey={api_key}&cmd={command}'.format(
url=url or Mylar._build_url(),
api_key=api_key or htpc.settings.get('mylar_apikey'),
command=command,
)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def getserieslist(self):
return self.fetch('getIndex')
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def GetWantedList(self):
return self.fetch('getWanted')
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def SearchForComic(self, name):
return self.fetch('findComic&%s' % urlencode({'name': name.encode(encoding='UTF-8', errors='strict')}))
@cherrypy.expose()
@require()
def RefreshComic(self, Id):
return self.fetch('refreshComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def DeleteComic(self, Id):
return self.fetch('delComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def PauseComic(self, Id):
return self.fetch('pauseComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ResumeComic(self, Id):
return self.fetch('resumeComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require()
def QueueIssue(self, issueid=None, new=False, **kwargs):
# Force check
if new:
return self.fetch('queueIssue&id=%s&new=True' % issueid, text=True)
return self.fetch('queueIssue&id=%s' % issueid, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def UnqueueIssue(self, issueid, name=''):
self.logger.debug('unqued %s' % name)
return self.fetch('unqueueIssue&id=%s' % issueid, text=True)
@cherrypy.expose()
@require()
def DownloadIssue(self, issueid, name=''):
""" downloads a issue via api and returns it to the browser """
self.logger.debug('Downloading issue %s' % name)
getfile = self.fetch('downloadIssue&id=%s' % issueid, img=True)
try:
with closing(StringIO()) as f:
f = StringIO()
f.write(getfile)
return cherrypy.lib.static.serve_fileobj(f.getvalue(), content_type='application/x-download', disposition=None, name=name, debug=False)
except Exception as e:
self.logger.error('Failed to download %s %s %s' % (name, issueid, e))
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def AddComic(self, id, **kwargs):
self.logger.debug('Added %s to mylar' % kwargs.get('name', ''))
return self.fetch('addComic&id=%s' % id)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def GetHistoryList(self):
return self.fetch('getHistory')
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceSearch(self):
return self.fetch('forceSearch', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceProcess(self, dir_=None):
if dir_:
return self.fetch('forceProcess?dir_=%s' % dir_, text=True)
return self.fetch('forceProcess', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceActiveArtistsUpdate(self):
return self.fetch('forceActiveComicsUpdate', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ShutDown(self):
return self.fetch('shutdown', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def UpDate(self):
return self.fetch('update', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ReStart(self):
return self.fetch('restart', text=True)
def fetch(self, command, url=None, api_key=None, img=False, json=True, text=False):
url = Mylar._build_api_url(command, url, api_key)
try:
if img or text:
json = False
result = ''
self.logger.debug('calling api @ %s' % url)
# set a high timeout as some requests take a while..
response = requests.get(url, timeout=120, verify=False)
if response.status_code != 200:
self.logger.error('failed to contact mylar')
return
if text:
result = response.text
if img:
result = response.content
if json:
result = response.json()
#self.logger.debug('Response: %s' % result)
return result
except Exception as e:
self.logger.error("Error calling api %s: %s" % (url, e))
@cherrypy.tools.json_out()
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ping(self,
mylar_enable, mylar_name,
mylar_host, mylar_port,
mylar_basepath,
mylar_apikey,
mylar_ssl=False,
mylar_reverse_proxy_link=None):
url = Mylar._build_url(
mylar_ssl,
mylar_host,
mylar_port,
mylar_basepath,
)
return self.fetch('getVersion', url, mylar_apikey)
def _get_status_icon(status):
green = ["Downloaded", "Active", "Processed"]
orange = ["Snatched"]
blue = ["Wanted"]
red = ["Unprocessed"]
mapsicon = {
'Downloaded': 'fa fa-download',
'Active': 'fa fa-rotate-right',
'Error': 'fa fa-bell-o',
'Paused': 'fa fa-pause',
'Snatched': 'fa fa-share-alt',
'Skipped': 'fa fa-fast-forward',
'Wanted': 'fa fa-heart',
'Processed': 'fa fa-check',
'Unprocessed': 'fa fa-exclamation-circle'
}
if not status:
return ''
label = ''
if status in green:
label = 'label-success'
elif status in orange:
label = 'label-warning'
elif status in blue:
label = 'label-info'
elif status in red:
label = 'label-error'
else:
pass
fmt = '<span class="label %s"><i class="%s icon-white"></i> %s</span>'
return fmt % (label, mapsicon[status], status)
| scith/htpc-manager_ynh | sources/modules/mylar.py | Python | gpl-3.0 | 9,922 | 0.001008 |
"""Custom keras layers
"""
# Coding: utf-8
# File name: custom_layer.py
# Created: 2016-07-24
# Description:
## v0.0: File created. MergeRowDot layer.
from __future__ import division
from __future__ import print_function
__author__ = "Hoang Nguyen"
__email__ = "hoangnt@ai.cs.titech.ac.jp"
from keras import backend as K
from keras.engine.topology import Merge
import numpy as np
# >>> BEGIN CLASS RowDot <<<
class RowDot(Merge):
"""
Layer for element wise merge mul and take sum along
the second axis.
"""
##################################################################### __init__
def __init__(self, layers=None, **kwargs):
"""
Init function.
"""
super(RowDot, self).__init__(layers=None, **kwargs)
######################################################################### call
def call(self, inputs, **kwargs):
"""
Layer logic.
"""
print('Inputs 0 shape: %s' % str(inputs[0].shape))
print('Inputs 1 shape: %s' % str(inputs[1].shape))
l1 = inputs[0]
l2 = inputs[1]
output = K.batch_dot(inputs[0], inputs[1], axes=[1,1])
return output
# === End CLASS MergeRowDot <<<
# >>> BEGIN HELPER FUNCTIONS <<<
############################################################################ dot
| gear/motifwalk | research/src/mane/custom_layers.py | Python | mit | 1,273 | 0.013354 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ComplexOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def numeric_array_pdf(self):
psers = {
"int": pd.Series([[1, 2, 3]]),
"float": pd.Series([[0.1, 0.2, 0.3]]),
"decimal": pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
}
return pd.concat(psers, axis=1)
@property
def numeric_array_psdf(self):
return ps.from_pandas(self.numeric_array_pdf)
@property
def numeric_array_df_cols(self):
return self.numeric_array_pdf.columns
@property
def non_numeric_array_pdf(self):
psers = {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
return pd.concat(psers, axis=1)
@property
def non_numeric_array_psdf(self):
return ps.from_pandas(self.non_numeric_array_pdf)
@property
def non_numeric_array_df_cols(self):
return self.non_numeric_array_pdf.columns
@property
def array_pdf(self):
return pd.concat([self.numeric_array_pdf, self.non_numeric_array_pdf], axis=1)
@property
def array_psdf(self):
return ps.from_pandas(self.array_pdf)
@property
def array_df_cols(self):
return self.array_pdf.columns
@property
def complex_pdf(self):
psers = {
"this_array": self.pser,
"that_array": pd.Series([[2, 3, 4]]),
"this_struct": pd.Series([("x", 1)]),
"that_struct": pd.Series([("a", 2)]),
}
return pd.concat(psers, axis=1)
@property
def complex_psdf(self):
pssers = {
"this_array": self.psser,
"that_array": ps.Series([[2, 3, 4]]),
"this_struct": ps.Index([("x", 1)]).to_series().reset_index(drop=True),
"that_struct": ps.Index([("a", 2)]).to_series().reset_index(drop=True),
}
return ps.concat(pssers, axis=1)
def test_add(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
self.assert_eq(pdf[col] + pdf[col], psdf[col] + psdf[col])
# Numeric array + Numeric array
for col in self.numeric_array_df_cols:
pser1, psser1 = pdf[col], psdf[col]
for other_col in self.numeric_array_df_cols:
pser2, psser2 = pdf[other_col], psdf[other_col]
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["bool"],
)
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["date"],
)
self.assertRaises(
TypeError,
lambda: psdf["bool"] + psdf["date"],
)
for col in self.non_numeric_array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser + pser, psser + psser)
# Numeric array + Non-numeric array
for numeric_col in self.numeric_array_df_cols:
for non_numeric_col in self.non_numeric_array_df_cols:
self.assertRaises(TypeError, lambda: psdf[numeric_col] + psdf[non_numeric_col])
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] - psdf[other_col])
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] * psdf[other_col])
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] / psdf[other_col])
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] // psdf[other_col])
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] % psdf[other_col])
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] ** psdf[other_col])
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
self.assert_eq(self.pser.astype(str), self.psser.astype(str))
def test_neg(self):
self.assertRaises(TypeError, lambda: -self.psser)
def test_abs(self):
self.assertRaises(TypeError, lambda: abs(self.psser))
def test_invert(self):
self.assertRaises(TypeError, lambda: ~self.psser)
def test_eq(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] == pdf["that_array"], psdf["this_array"] == psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["that_struct"], psdf["this_struct"] == psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] == pdf["this_array"], psdf["this_array"] == psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["this_struct"], psdf["this_struct"] == psdf["this_struct"]
)
def test_ne(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] != pdf["that_array"], psdf["this_array"] != psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["that_struct"], psdf["this_struct"] != psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] != pdf["this_array"], psdf["this_array"] != psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["this_struct"], psdf["this_struct"] != psdf["this_struct"]
)
def test_lt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] < pdf["that_array"], psdf["this_array"] < psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["that_struct"], psdf["this_struct"] < psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] < pdf["this_array"], psdf["this_array"] < psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["this_struct"], psdf["this_struct"] < psdf["this_struct"]
)
def test_le(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] <= pdf["that_array"], psdf["this_array"] <= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["that_struct"], psdf["this_struct"] <= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] <= pdf["this_array"], psdf["this_array"] <= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["this_struct"], psdf["this_struct"] <= psdf["this_struct"]
)
def test_gt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] > pdf["that_array"], psdf["this_array"] > psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["that_struct"], psdf["this_struct"] > psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] > pdf["this_array"], psdf["this_array"] > psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["this_struct"], psdf["this_struct"] > psdf["this_struct"]
)
def test_ge(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] >= pdf["that_array"], psdf["this_array"] >= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["that_struct"], psdf["this_struct"] >= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] >= pdf["this_array"], psdf["this_array"] >= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["this_struct"], psdf["this_struct"] >= psdf["this_struct"]
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_complex_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| chuckchen/spark | python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py | Python | apache-2.0 | 13,290 | 0.002859 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools import float_compare
from openerp.tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if asset.value_residual == 0.0:
continue
posted_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)],order='depreciation_date desc')
old_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_id', '=', False)])
if old_depreciation_line_ids:
depreciation_lin_obj.unlink(cr, uid, old_depreciation_line_ids, context=context)
amount_to_depr = residual_amount = asset.value_residual
if asset.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date(cr, uid, [asset.id], context)[asset.id], '%Y-%m-%d')
else:
# depreciation_date = 1st January of purchase year
purchase_date = datetime.strptime(asset.purchase_date, '%Y-%m-%d')
#if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if (len(posted_depreciation_line_ids)>0):
last_depreciation_date = datetime.strptime(depreciation_lin_obj.browse(cr,uid,posted_depreciation_line_ids[0],context=context).depreciation_date, '%Y-%m-%d')
depreciation_date = (last_depreciation_date+relativedelta(months=+asset.method_period))
else:
depreciation_date = datetime(purchase_date.year, 1, 1)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(cr, uid, asset, depreciation_date, total_days, context=context)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
i = x + 1
amount = self._compute_board_amount(cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=context)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': asset.id,
'sequence': i,
'name': str(asset.id) +'/' + str(i),
'remaining_value': residual_amount,
'depreciated_value': (asset.purchase_value - asset.salvage_value) - (residual_amount + amount),
'depreciation_date': depreciation_date.strftime('%Y-%m-%d'),
}
depreciation_lin_obj.create(cr, uid, vals, context=context)
# Considering Depr. Period as months
depreciation_date = (datetime(year, month, day) + relativedelta(months=+asset.method_period))
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
return True
def validate(self, cr, uid, ids, context=None):
if context is None:
context = {}
return self.write(cr, uid, ids, {
'state':'open'
}, context)
def set_to_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _amount_residual(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
l.asset_id as id, SUM(abs(l.debit-l.credit)) AS amount
FROM
account_move_line l
WHERE
l.asset_id IN %s GROUP BY l.asset_id """, (tuple(ids),))
res=dict(cr.fetchall())
for asset in self.browse(cr, uid, ids, context):
company_currency = asset.company_id.currency_id.id
current_currency = asset.currency_id.id
amount = self.pool['res.currency'].compute(cr, uid, company_currency, current_currency, res.get(asset.id, 0.0), context=context)
res[asset.id] = asset.purchase_value - amount - asset.salvage_value
for id in ids:
res.setdefault(id, 0.0)
return res
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
val = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val}
def onchange_purchase_salvage_value(self, cr, uid, ids, purchase_value, salvage_value, context=None):
val = {}
for asset in self.browse(cr, uid, ids, context=context):
if purchase_value:
val['value_residual'] = purchase_value - salvage_value
if salvage_value:
val['value_residual'] = purchase_value - salvage_value
return {'value': val}
def _entry_count(self, cr, uid, ids, field_name, arg, context=None):
MoveLine = self.pool('account.move.line')
return {
asset_id: MoveLine.search_count(cr, uid, [('asset_id', '=', asset_id)], context=context)
for asset_id in ids
}
_columns = {
'account_move_line_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
'entry_count': fields.function(_entry_count, string='# Asset Entries', type='integer'),
'name': fields.char('Asset Name', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'code': fields.char('Reference', size=32, readonly=True, states={'draft':[('readonly',False)]}),
'purchase_value': fields.float('Gross Value', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.many2one('res.currency','Currency',required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'note': fields.text('Note'),
'category_id': fields.many2one('account.asset.category', 'Asset Category', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'parent_id': fields.many2one('account.asset.asset', 'Parent Asset', readonly=True, states={'draft':[('readonly',False)]}),
'child_ids': fields.one2many('account.asset.asset', 'parent_id', 'Children Assets', copy=True),
'purchase_date': fields.date('Purchase Date', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True, copy=False,
help="When an asset is created, the status is 'Draft'.\n" \
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" \
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status."),
'active': fields.boolean('Active'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True, states={'draft':[('readonly',False)]}),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', readonly=True, states={'draft':[('readonly',False)]}, help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Number of Months in a Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The amount of time between two depreciations, in months"),
'method_end': fields.date('Ending Date', readonly=True, states={'draft':[('readonly',False)]}),
'method_progress_factor': fields.float('Degressive Factor', readonly=True, states={'draft':[('readonly',False)]}),
'value_residual': fields.function(_amount_residual, method=True, digits_compute=dp.get_precision('Account'), string='Residual Value'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True, readonly=True, states={'draft':[('readonly',False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'prorata':fields.boolean('Prorata Temporis', readonly=True, states={'draft':[('readonly',False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'history_ids': fields.one2many('account.asset.history', 'asset_id', 'History', readonly=True),
'depreciation_line_ids': fields.one2many('account.asset.depreciation.line', 'asset_id', 'Depreciation Lines', readonly=True, states={'draft':[('readonly',False)],'open':[('readonly',False)]}),
'salvage_value': fields.float('Salvage Value', digits_compute=dp.get_precision('Account'), help="It is the amount you plan to have that you cannot depreciate.", readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'code': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.asset.code'),
'purchase_date': lambda obj, cr, uid, context: time.strftime('%Y-%m-%d'),
'active': True,
'state': 'draft',
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
'currency_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.currency_id.id,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.asset',context=context),
}
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_asset_asset, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
def _check_prorata(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.prorata and asset.method_time != 'number':
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive assets.', ['parent_id']),
(_check_prorata, 'Prorata temporis can be applied only for time method "number of depreciations".', ['prorata']),
]
def onchange_category_id(self, cr, uid, ids, category_id, context=None):
res = {'value':{}}
asset_categ_obj = self.pool.get('account.asset.category')
if category_id:
category_obj = asset_categ_obj.browse(cr, uid, category_id, context=context)
res['value'] = {
'method': category_obj.method,
'method_number': category_obj.method_number,
'method_time': category_obj.method_time,
'method_period': category_obj.method_period,
'method_progress_factor': category_obj.method_progress_factor,
'method_end': category_obj.method_end,
'prorata': category_obj.prorata,
}
return res
def onchange_method_time(self, cr, uid, ids, method_time='number', context=None):
res = {'value': {}}
if method_time != 'number':
res['value'] = {'prorata': False}
return res
def _compute_entries(self, cr, uid, ids, period_id, context=None):
result = []
period_obj = self.pool.get('account.period')
depreciation_obj = self.pool.get('account.asset.depreciation.line')
period = period_obj.browse(cr, uid, period_id, context=context)
depreciation_ids = depreciation_obj.search(cr, uid, [('asset_id', 'in', ids), ('depreciation_date', '<=', period.date_stop), ('depreciation_date', '>=', period.date_start), ('move_check', '=', False)], context=context)
context = dict(context or {}, depreciation_date=period.date_stop)
return depreciation_obj.create_move(cr, uid, depreciation_ids, context=context)
def create(self, cr, uid, vals, context=None):
asset_id = super(account_asset_asset, self).create(cr, uid, vals, context=context)
self.compute_depreciation_board(cr, uid, [asset_id], context=context)
return asset_id
def open_entries(self, cr, uid, ids, context=None):
context = dict(context or {}, search_default_asset_id=ids, default_asset_id=ids)
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
}
class account_asset_depreciation_line(osv.osv):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
def _get_move_check(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = bool(line.move_id)
return res
_columns = {
'name': fields.char('Depreciation Name', required=True, select=1),
'sequence': fields.integer('Sequence', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True, ondelete='cascade'),
'parent_state': fields.related('asset_id', 'state', type='char', string='State of Asset'),
'amount': fields.float('Current Depreciation', digits_compute=dp.get_precision('Account'), required=True),
'remaining_value': fields.float('Next Period Depreciation', digits_compute=dp.get_precision('Account'),required=True),
'depreciated_value': fields.float('Amount Already Depreciated', required=True),
'depreciation_date': fields.date('Depreciation Date', select=1),
'move_id': fields.many2one('account.move', 'Depreciation Entry'),
'move_check': fields.function(_get_move_check, method=True, type='boolean', string='Posted', store=True)
}
def create_move(self, cr, uid, ids, context=None):
context = dict(context or {})
can_close = False
asset_obj = self.pool.get('account.asset.asset')
period_obj = self.pool.get('account.period')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
created_move_ids = []
asset_ids = []
for line in self.browse(cr, uid, ids, context=context):
depreciation_date = context.get('depreciation_date') or line.depreciation_date or time.strftime('%Y-%m-%d')
period_ids = period_obj.find(cr, uid, depreciation_date, context=context)
company_currency = line.asset_id.company_id.currency_id.id
current_currency = line.asset_id.currency_id.id
context.update({'date': depreciation_date})
amount = currency_obj.compute(cr, uid, current_currency, company_currency, line.amount, context=context)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' and 1) or -1
asset_name = "/"
reference = line.asset_id.name
move_vals = {
'name': asset_name,
'date': depreciation_date,
'ref': reference,
'period_id': period_ids and period_ids[0] or False,
'journal_id': line.asset_id.category_id.journal_id.id,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_depreciation_id.id,
'debit': 0.0 if float_compare(amount, 0.0, precision_digits=prec) > 0 else -amount,
'credit': amount if float_compare(amount, 0.0, precision_digits=prec) > 0 else 0.0,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'date': depreciation_date,
})
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_expense_depreciation_id.id,
'credit': 0.0 if float_compare(amount, 0.0, precision_digits=prec) > 0 else -amount,
'debit': amount if float_compare(amount, 0.0, precision_digits=prec) > 0 else 0.0,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id,
'date': depreciation_date,
'asset_id': line.asset_id.id
})
self.write(cr, uid, line.id, {'move_id': move_id}, context=context)
created_move_ids.append(move_id)
asset_ids.append(line.asset_id.id)
# we re-evaluate the assets to determine whether we can close them
for asset in asset_obj.browse(cr, uid, list(set(asset_ids)), context=context):
if currency_obj.is_zero(cr, uid, asset.currency_id, asset.value_residual):
asset.write({'state': 'close'})
return created_move_ids
class account_move_line(osv.osv):
_inherit = 'account.move.line'
_columns = {
'asset_id': fields.many2one('account.asset.asset', 'Asset', ondelete="restrict"),
}
class account_asset_history(osv.osv):
_name = 'account.asset.history'
_description = 'Asset history'
_columns = {
'name': fields.char('History name', select=1),
'user_id': fields.many2one('res.users', 'User', required=True),
'date': fields.date('Date', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="The method to use to compute the dates and number of depreciation lines.\n"\
"Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
"Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="Time in month between two depreciations"),
'method_end': fields.date('Ending date'),
'note': fields.text('Note'),
}
_order = 'date desc'
_defaults = {
'date': lambda *args: time.strftime('%Y-%m-%d'),
'user_id': lambda self, cr, uid, ctx: uid
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| shingonoide/odoo | addons/account_asset/account_asset.py | Python | agpl-3.0 | 29,332 | 0.008557 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Multic Fix',
'version': '8.0.1.0.1',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Multic Fix
==================================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'stock_account',
],
'data': ['stock_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | sysadminmatmoz/ingadhoc | stock_multic_fix/__openerp__.py | Python | agpl-3.0 | 1,568 | 0.001276 |
from button import Button
class SellButton(Button):
def __init__(self, image, x, y, parent):
super(SellButton, self).__init__(image, x, y, parent)
def get_clicked(self):
self.parent.sell_tower() | ToBaer94/PygameTowerDefense | buttons/sell_button.py | Python | lgpl-3.0 | 221 | 0.004525 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteRegistration
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-domains
# [START domains_v1_generated_Domains_DeleteRegistration_sync]
from google.cloud import domains_v1
def sample_delete_registration():
# Create a client
client = domains_v1.DomainsClient()
# Initialize request argument(s)
request = domains_v1.DeleteRegistrationRequest(
name="name_value",
)
# Make the request
operation = client.delete_registration(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END domains_v1_generated_Domains_DeleteRegistration_sync]
| googleapis/python-domains | samples/generated_samples/domains_v1_generated_domains_delete_registration_sync.py | Python | apache-2.0 | 1,539 | 0.00065 |
"""Defines chart-wide shared test fixtures."""
import numpy as np
import pandas as pd
import pytest
from bokeh.sampledata.autompg import autompg
class TestData(object):
"""Contains properties with easy access to data used across tests."""
def __init__(self):
self.cat_list = ['a', 'c', 'a', 'b']
self.list_data = [[1, 2, 3, 4], [2, 3, 4, 5]]
self.array_data = [np.array(item) for item in self.list_data]
self.dict_data = {'col1': self.list_data[0],
'col2': self.list_data[1]}
self.pd_data = pd.DataFrame(self.dict_data)
self.records_data = self.pd_data.to_dict(orient='records')
self.auto_data = autompg
@pytest.fixture(scope='module')
def test_data():
return TestData()
@pytest.fixture(scope='module')
def wide_data_with_cat(test_data):
data = test_data.dict_data.copy()
data['col3'] = test_data.cat_list
return data
@pytest.fixture(scope='module')
def df_with_cat_index(test_data):
return pd.DataFrame(test_data.dict_data, index=test_data.cat_list)
| htygithub/bokeh | bokeh/charts/conftest.py | Python | bsd-3-clause | 1,071 | 0.000934 |
#!/usr/bin/env python
import asyncio
import os
import signal
import websockets
async def echo(websocket):
async for message in websocket:
await websocket.send(message)
async def main():
# Set the stop condition when receiving SIGTERM.
loop = asyncio.get_running_loop()
stop = loop.create_future()
loop.add_signal_handler(signal.SIGTERM, stop.set_result, None)
async with websockets.serve(
echo,
host="localhost",
port=8000 + int(os.environ["SUPERVISOR_PROCESS_NAME"][-2:]),
):
await stop
if __name__ == "__main__":
asyncio.run(main())
| aaugustin/websockets | example/deployment/haproxy/app.py | Python | bsd-3-clause | 616 | 0 |
#! /usr/bin/env python
# Print digits of pi forever.
#
# The algorithm, using Python's 'long' integers ("bignums"), works
# with continued fractions, and was conceived by Lambert Meertens.
#
# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,
# published by Prentice-Hall (UK) Ltd., 1990.
import sys
def main():
k, a, b, a1, b1 = 2L, 4L, 1L, 12L, 4L
while 1:
# Next approximation
p, q, k = k*k, 2L*k+1L, k+1L
a, b, a1, b1 = a1, b1, p*a+q*a1, p*b+q*b1
# Print common digits
d, d1 = a//b, a1//b1
while d == d1:
output(d)
a, a1 = 10L*(a%b), 10L*(a1%b1)
d, d1 = a//b, a1//b1
def output(d):
# Use write() to avoid spaces between the digits
# Use str() to avoid the 'L'
sys.stdout.write(str(d))
# Flush so the output is seen immediately
sys.stdout.flush()
if __name__ == "__main__":
main()
| MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Demo/scripts/pi.py | Python | gpl-2.0 | 928 | 0.005388 |
"""
Views related to the video upload feature
"""
from boto import s3
import csv
from uuid import uuid4
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.http import require_GET, require_http_methods
import rfc6266
from edxval.api import create_video, get_videos_for_ids, SortDirection, VideoSortField
from opaque_keys.edx.keys import CourseKey
from contentstore.models import VideoUploadConfig
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from util.json_request import expect_json, JsonResponse
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import modulestore
from .course import get_course_and_check_access
__all__ = ["videos_handler", "video_encodings_download"]
# String constant used in asset keys to identify video assets.
VIDEO_ASSET_TYPE = "video"
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
class StatusDisplayStrings(object):
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = ugettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = ugettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = ugettext_noop("Complete")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = ugettext_noop("Failed"),
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = ugettext_noop("Invalid Token"),
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = ugettext_noop("Unknown")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"invalid_token": _INVALID_TOKEN
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN))
@expect_json
@login_required
@require_http_methods(("GET", "POST"))
def videos_handler(request, course_key_string):
"""
The restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
else:
return videos_index_html(course)
else:
return videos_post(course, request)
@login_required
@require_GET
def video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos = list(_get_videos(course))
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return {
key.encode("utf-8"): value.encode("utf-8")
for key, value in ret.items()
}
response = HttpResponse(content_type="text/csv")
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course)
# See https://tools.ietf.org/html/rfc6266#appendix-D
response["Content-Disposition"] = rfc6266.build_header(
filename + ".csv",
filename_compat="video_urls.csv"
)
writer = csv.DictWriter(
response,
[
col_name.encode("utf-8")
for col_name
in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols
],
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
return response
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
def _get_videos(course):
"""
Retrieves the list of videos from VAL corresponding to the videos listed in
the asset metadata store.
"""
edx_videos_ids = [
v.asset_id.path
for v in modulestore().get_all_asset_metadata(course.id, VIDEO_ASSET_TYPE)
]
videos = list(get_videos_for_ids(edx_videos_ids, VideoSortField.created, SortDirection.desc))
# convert VAL's status to studio's Video Upload feature status.
for video in videos:
video["status"] = StatusDisplayStrings.get(video["status"])
return videos
def _get_index_videos(course):
"""
Returns the information about each video upload required for the video list
"""
return list(
{
attr: video[attr]
for attr in ["edx_video_id", "client_video_id", "created", "duration", "status"]
}
for video in _get_videos(course)
)
def videos_index_html(course):
"""
Returns an HTML page to display previous video uploads and allow new ones
"""
return render_to_response(
"videos_index.html",
{
"context_course": course,
"post_url": reverse_course_url("videos_handler", unicode(course.id)),
"encodings_download_url": reverse_course_url("video_encodings_download", unicode(course.id)),
"previous_uploads": _get_index_videos(course),
"concurrent_upload_limit": settings.VIDEO_UPLOAD_PIPELINE.get("CONCURRENT_UPLOAD_LIMIT", 0),
}
)
def videos_index_json(course):
"""
Returns JSON in the following format:
{
"videos": [{
"edx_video_id": "aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa",
"client_video_id": "video.mp4",
"created": "1970-01-01T00:00:00Z",
"duration": 42.5,
"status": "upload"
}]
}
"""
return JsonResponse({"videos": _get_index_videos(course)}, status=200)
def videos_post(course, request):
"""
Input (JSON):
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
Returns (JSON):
{
"files": [{
"file_name": "video.mp4",
"upload_url": "http://example.com/put_video"
}]
}
The returned array corresponds exactly to the input array.
"""
error = None
if "files" not in request.json:
error = "Request object is not JSON or does not contain 'files'"
elif any(
"file_name" not in file or "content_type" not in file
for file in request.json["files"]
):
error = "Request 'files' entry does not contain 'file_name' and 'content_type'"
if error:
return JsonResponse({"error": error}, status=400)
bucket = storage_service_bucket()
course_video_upload_token = course.video_upload_pipeline["course_video_upload_token"]
req_files = request.json["files"]
resp_files = []
for req_file in req_files:
file_name = req_file["file_name"]
edx_video_id = unicode(uuid4())
key = storage_service_key(bucket, file_name=edx_video_id)
for metadata_name, value in [
("course_video_upload_token", course_video_upload_token),
("client_video_id", file_name),
("course_key", unicode(course.id)),
]:
key.set_metadata(metadata_name, value)
upload_url = key.generate_url(
KEY_EXPIRATION_IN_SECONDS,
"PUT",
headers={"Content-Type": req_file["content_type"]}
)
# persist edx_video_id as uploaded through this course
video_meta_data = AssetMetadata(course.id.make_asset_key(VIDEO_ASSET_TYPE, edx_video_id))
modulestore().save_asset_metadata(video_meta_data, request.user.id)
# persist edx_video_id in VAL
create_video({
"edx_video_id": edx_video_id,
"status": "upload",
"client_video_id": file_name,
"duration": 0,
"encoded_videos": [],
})
resp_files.append({"file_name": file_name, "upload_url": upload_url})
return JsonResponse({"files": resp_files}, status=200)
def storage_service_bucket():
"""
Returns an S3 bucket for video uploads.
"""
conn = s3.connection.S3Connection(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY
)
return conn.get_bucket(settings.VIDEO_UPLOAD_PIPELINE["BUCKET"])
def storage_service_key(bucket, file_name):
"""
Returns an S3 key to the given file in the given bucket.
"""
key_name = "{}/{}".format(
settings.VIDEO_UPLOAD_PIPELINE.get("ROOT_PATH", ""),
file_name
)
return s3.key.Key(bucket, key_name)
| mtlchun/edx | cms/djangoapps/contentstore/views/videos.py | Python | agpl-3.0 | 12,550 | 0.001594 |
import click
from do_cli.contexts import CTX
from do_cli.commands.common import host_commands
@click.command('list')
@click.option('-f', '--force-refresh', is_flag=True, help='Pull data from the API')
@click.option('-h', '--host-names', help='Comma separated list of host names')
@CTX
def cli(ctx, force_refresh, host_names):
"""
Show minimal data for droplets
--host-names -h Comma separated list of host names
Show minimal data for specific droplets
"""
if ctx.verbose:
click.echo("Show minimal data for droplets")
click.echo(host_commands(ctx, force_refresh, host_names))
if ctx.verbose:
click.echo('---- cmd_list done ----')
| meganlkm/do-cli | do_cli/commands/cmd_list.py | Python | mit | 689 | 0.001451 |
import base64
import csv
import io
import multiprocessing
import numpy as np
import sys
from collections import defaultdict
from io import StringIO
from pathlib import Path
# Import matplotlib ourselves and make it use agg (not any GUI anything)
# before the analyze module pulls it in.
import matplotlib
matplotlib.use('Agg')
from bottle import get, post, redirect, request, response, jinja2_template as template # noqa: E402
from analysis import heatmaps, process, plot # noqa: E402
from web.error_handlers import TrackParseError # noqa: E402
from common import mkdir # noqa: E402
import config # noqa: E402
def _make_stats_output(stats, all_keys, do_csv):
for i in range(len(stats)):
stat = stats[i]
for k in all_keys:
if k in stat:
val = stat[k]
if isinstance(val, (np.float32, np.float64)):
stat[k] = "%0.3f" % val
else:
stat[k] = ""
all_keys.remove('Track file') # will be added as first column
all_keys = sorted(list(all_keys))
all_keys[:0] = ['Track file'] # prepend 'Track file' header
if do_csv:
output = StringIO()
writer = csv.DictWriter(output, fieldnames=all_keys)
writer.writeheader()
for stat in stats:
writer.writerow(stat)
csvstring = output.getvalue()
output.close()
response.content_type = 'text/csv'
response.headers['Content-Disposition'] = 'attachment; filename=atles_stats.csv'
return csvstring
else:
return template('stats', keys=all_keys, stats=stats)
@get('/stats/')
def get_stats():
trackrels = request.query.tracks.split('|')
exp_type = request.query.exp_type
stats = []
all_keys = set()
for trackrel in trackrels:
curstats = {}
curstats['Track file'] = trackrel
try:
processor = process.TrackProcessor(str(config.TRACKDIR / trackrel))
curstats.update(processor.get_setup(['experiment', 'phases', 'general']))
curstats.update(processor.get_stats_single_table(include_phases=True))
if exp_type:
curstats.update(processor.get_exp_stats(exp_type))
except (ValueError, IndexError):
# often 'wrong number of columns' due to truncated file from killed experiment
raise(TrackParseError(trackrel, sys.exc_info()))
all_keys.update(curstats.keys())
stats.append(curstats)
return _make_stats_output(stats, all_keys, do_csv=request.query.csv)
def _do_analyze(trackrel):
trackrel = Path(trackrel)
# ensure directories exist for plot creation
trackreldir = trackrel.parent
mkdir(config.PLOTDIR / trackreldir)
# look for debug frames to create links in the trace plot
trackname = trackrel.name.replace('-track.csv', '')
dbgframedir = config.DBGFRAMEDIR / trackreldir / trackname
dbgframes = list(dbgframedir.glob("subframe*.png")) # list so TrackPlotter can re-use (instead of exhausting the iterable)
processor = process.TrackProcessor(str(config.TRACKDIR / trackrel))
plotter = plot.TrackPlotter(processor, dbgframes)
plotter.plot_heatmap()
def saveplot(filename):
plot.savefig(str(config.PLOTDIR / filename))
saveplot("{}.10.heat.png".format(trackrel))
plotter.plot_invalidheatmap()
saveplot("{}.12.heat.invalid.png".format(trackrel))
if processor.num_phases() > 1:
plotter.plot_heatmap(plot_type='per-phase')
saveplot("{}.14.heat.perphase.png".format(trackrel))
plotter.plot_heatmap(plot_type='per-minute')
saveplot("{}.15.heat.perminute.png".format(trackrel))
plotter.plot_trace()
saveplot("{}.20.plot.svg".format(trackrel))
@post('/analyze/')
def post_analyze():
trackrel = request.query.trackrel
try:
_do_analyze(trackrel)
except ValueError:
# often 'wrong number of columns' due to truncated file from killed experiment
raise(TrackParseError(trackrel, sys.exc_info()))
redirect("/view/{}".format(trackrel))
def _analyze_selection(trackrels):
for trackrel in trackrels:
try:
_do_analyze(trackrel)
except ValueError:
# often 'wrong number of columns' due to truncated file from killed experiment
pass # nothing to be done here; we're processing in the background
@post('/analyze_selection/')
def post_analyze_selection():
trackrels = request.query.trackrels.split('|')
p = multiprocessing.Process(target=_analyze_selection, args=(trackrels,))
p.start()
@get('/heatmaps/')
def get_heatmaps():
trackrels = request.query.tracks.split('|')
processors = []
# to verify all phases are equivalent
plength_map = defaultdict(list)
for trackrel in trackrels:
try:
p = process.TrackProcessor(str(config.TRACKDIR / trackrel), just_raw_data=True)
processors.append(p)
plength_map[tuple(phase.length for phase in p.phase_list)].append(trackrel)
except ValueError:
raise(TrackParseError(trackrel, sys.exc_info()))
if len(plength_map) > 1:
lengths_string = '\n'.join(
"{} in:\n {}\n".format(
str(lengths),
"\n ".join(trackrel for trackrel in plength_map[lengths])
)
for lengths in plength_map
)
return template('error', errormsg="The provided tracks do not all have the same phase lengths. Please select tracks that share an experimental setup.<br>Phase lengths found:<pre>{}</pre>".format(lengths_string))
# Save all images as binary to be included in the page directly
# Base64-encoded. (Saves having to write temporary data to filesystem.)
images_data = []
# use phases from an arbitrary track
plengths = plength_map.popitem()[0]
dataframes = [proc.df for proc in processors]
phase_start = 0
for i, length in enumerate(plengths):
phase_end = phase_start + length
x, y = heatmaps.get_timeslice(dataframes, phase_start*60, phase_end*60)
title = "Phase {} ({}:00-{}:00)".format(i+1, phase_start, phase_end)
ax = heatmaps.make_heatmap(x, y, title)
plot.format_axis(ax)
image_data = io.BytesIO()
plot.savefig(image_data, format='png')
images_data.append(
base64.b64encode(image_data.getvalue()).decode()
)
phase_start = phase_end
return template('view', imgdatas=images_data)
| liffiton/ATLeS | src/web/controller_analyze.py | Python | mit | 6,534 | 0.00153 |
# project/models.py
from project import db
from project.uuid_gen import id_column
class Payment(db.Model):
id = id_column()
email = db.Column(db.String(255), unique=False, nullable=False)
names = db.Column(db.String(255), unique=False, nullable=False)
cardNumber = db.Column(db.String(255), unique=False, nullable=False)
phone = db.Column(db.String(255), unique=False, nullable=False)
amount = db.Column(db.Float, unique=False, nullable=False)
object_payment = db.Column(db.String(255), unique=False, nullable=False)
status = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, email, names, card_number, phone, amount, object_payment, status=False):
self.names = names
self.email = email
self.cardNumber = card_number
self.phone = phone
self.amount = amount
self.object_payment = object_payment
self.status = status
| fiston/abaganga | project/payment/models.py | Python | mit | 932 | 0.001073 |
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
import subprocess
import traceback
import yardstick.ssh as ssh
import basemonitor as basemonitor
LOG = logging.getLogger(__name__)
def _execute_shell_command(command):
'''execute shell script with error handling'''
exitcode = 0
output = []
try:
output = subprocess.check_output(command, shell=True)
except Exception:
exitcode = -1
output = traceback.format_exc()
LOG.error("exec command '%s' error:\n " % command)
LOG.error(traceback.format_exc())
return exitcode, output
class MonitorOpenstackCmd(basemonitor.BaseMonitor):
"""docstring for MonitorApi"""
__monitor_type__ = "openstack-cmd"
def setup(self):
self.connection = None
node_name = self._config.get("host", None)
if node_name:
host = self._context[node_name]
ip = host.get("ip", None)
user = host.get("user", "root")
key_filename = host.get("key_filename", "~/.ssh/id_rsa")
self.connection = ssh.SSH(user, ip, key_filename=key_filename)
self.connection.wait(timeout=600)
LOG.debug("ssh host success!")
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
self.cmd = self._config["command_name"]
def monitor_func(self):
exit_status = 0
if self.connection:
exit_status, stdout, stderr = self.connection.execute(
"/bin/bash -s '{0}'".format(self.cmd),
stdin=open(self.check_script, "r"))
LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
(exit_status, stdout, stderr))
else:
exit_status, stdout = _execute_shell_command(self.cmd)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
LOG.debug("the _result:%s" % self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
return False
else:
LOG.info("the sla is passed")
return True
def _test(): # pragma: no cover
host = {
"ip": "192.168.235.22",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
context = {"node1": host}
monitor_configs = []
config = {
'monitor_type': 'openstack-cmd',
'command_name': 'nova image-list',
'monitor_time': 1,
'host': 'node1',
'sla': {'max_outage_time': 5}
}
monitor_configs.append(config)
p = basemonitor.MonitorMgr()
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
p.verify_SLA()
if __name__ == '__main__': # pragma: no cover
_test()
| dtudares/hello-world | yardstick/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py | Python | apache-2.0 | 3,388 | 0 |
#coding=utf-8
from selenium import webdriver
import pymysql
import unittest,time
from selenium.webdriver.common.keys import Keys
print("test36")
wf = webdriver.Firefox()
mark_01=0
n=0
wf.get("http://192.168.17.66:8080/LexianManager/html/login.html")
wf.find_element_by_xpath(".//*[@id='login']").click()
time.sleep(1)
wf.find_element_by_xpath(".//*[@id='leftMenus']/div[8]/div[1]/div[2]/a[2]").click()
time.sleep(1)
wf.find_element_by_xpath(".//*[@id='leftMenus']/div[8]/div[2]/ul/li[2]/a").click()
time.sleep(1)
wf.switch_to_frame("manager")
| Singularmotor/auto_test_lexian | auto_test_lexian/test_case/delaytest_38.py | Python | mit | 544 | 0.012868 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for Android contacts2.db database events."""
from plaso.lib import eventdata
class AndroidCallFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for Android call history events."""
DATA_TYPE = 'android:event:call'
FORMAT_STRING_PIECES = [
u'{call_type}',
u'Number: {number}',
u'Name: {name}',
u'Duration: {duration} seconds']
FORMAT_STRING_SHORT_PIECES = [u'{call_type} Call']
SOURCE_LONG = 'Android Call History'
SOURCE_SHORT = 'LOG'
| iwm911/plaso | plaso/formatters/android_calls.py | Python | apache-2.0 | 1,199 | 0.005004 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python demo tool for BertNLClassifier."""
import inspect
import os.path as _os_path
import subprocess
import sys
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model_path', None, 'Model Path')
flags.DEFINE_string('text', None, 'Text to Predict')
# Required flag.
flags.mark_flag_as_required('model_path')
flags.mark_flag_as_required('text')
_BERT_NL_CLASSIFIER_NATIVE_PATH = _os_path.join(
_os_path.dirname(inspect.getfile(inspect.currentframe())),
'../bert_nl_classifier_demo')
def classify(model_path, text):
"""Classifies input text into different categories.
Args:
model_path: path to model
text: input text
"""
# Run the detection tool:
subprocess.run([
_BERT_NL_CLASSIFIER_NATIVE_PATH + ' --model_path=' + model_path +
' --text="' + text + '"'
],
shell=True,
check=True)
def run_main(argv):
del argv # Unused.
classify(FLAGS.model_path, FLAGS.text)
# Simple wrapper to make the code pip-friendly
def main():
app.run(main=run_main, argv=sys.argv)
if __name__ == '__main__':
main()
| chromium/chromium | third_party/tflite_support/src/tensorflow_lite_support/examples/task/text/desktop/python/bert_nl_classifier_demo.py | Python | bsd-3-clause | 1,817 | 0.003853 |
# [1] https://doi.org/10.1016/0009-2614(91)90115-P
# Helgaker, 1991
import numpy as np
from scipy.optimize import newton
from pysisyphus.tsoptimizers.TSHessianOptimizer import TSHessianOptimizer
class TRIM(TSHessianOptimizer):
def optimize(self):
energy, gradient, H, eigvals, eigvecs, resetted = self.housekeeping()
self.update_ts_mode(eigvals, eigvecs)
self.log(f"Signs of eigenvalue and -vector of root {self.root} "
"will be reversed!")
# Transform gradient to basis of eigenvectors
gradient_ = eigvecs.T.dot(gradient)
# Construct image function by inverting the signs of the eigenvalue and
# -vector of the mode to follow uphill.
eigvals_ = eigvals.copy()
eigvals_[self.root] *= -1
gradient_ = gradient_.copy()
gradient_[self.root] *= -1
def get_step(mu):
zetas = -gradient_ / (eigvals_ - mu)
# Replace nan with 0.
zetas = np.nan_to_num(zetas)
# Transform to original basis
step = eigvecs * zetas
step = step.sum(axis=1)
return step
def get_step_norm(mu):
return np.linalg.norm(get_step(mu))
def func(mu):
return get_step_norm(mu) - self.trust_radius
mu = 0
norm0 = get_step_norm(mu)
if norm0 > self.trust_radius:
mu, res = newton(func, x0=mu, full_output=True)
assert res.converged
self.log(f"Using levelshift of μ={mu:.4f}")
else:
self.log("Took pure newton step without levelshift")
step = get_step(mu)
step_norm = np.linalg.norm(step)
self.log(f"norm(step)={step_norm:.6f}")
self.predicted_energy_changes.append(self.quadratic_model(gradient, self.H, step))
return step
| eljost/pysisyphus | pysisyphus/tsoptimizers/TRIM.py | Python | gpl-3.0 | 1,861 | 0.001075 |
class Node:
def __init__(self,val):
self.value = val
self.nextNode = None
def getValue(self):
return self.value
def getNextNode(self):
return self.nextNode
def setValue(self,val):
self.value = val
def setNextNode(self,nxtNode):
self.nextNode = nxtNode
"""
Linked List (LL)
Following are the basic operations supported by a list :-
1. Add − Adds an element at the beginning of the list.
2. Deletion − Deletes an element at the beginning of the list.
3. Display − Displays the complete list.
4. Search − Searches an element using the given key.
"""
class LinkedList:
def __init__(self):
self.head = None
#Returns 'True' or 'False' depending on the size of the LL
def isEmpty(self):
if(self.head == None):
return True
else:
return False
# Add a node to the head of the LL
def add(self,value):
temp = Node(value)
temp.setNextNode(self.head)
self.head = temp
# gives the total number of elements
def size(self):
temp = self.head
count = 0
if(temp != None):
count = 1
while(temp.getNextNode() != None):
count += 1
temp = temp.getNextNode()
return count
# prints the elemnts in the List
def printList(self):
temp = self.head
while(temp != None):
print (temp.getValue())
temp = temp.getNextNode()
def deleteNode(self,key):
temp = self.head
while(temp != None):
nextNode = temp.getNextNode()
if(nextNode != None):
if(nextNode.getValue() == key):
temp = temp.setNextNode(nextNode.getNextNode())
else:
temp = temp.getNextNode()
if __name__ == "__main__":
#Create a new linked list
myList = LinkedList()
# Add elements to the list
myList.add(1)
myList.add(2)
myList.add(3)
myList.add(4)
myList.add(5)
myList.add(6)
myList.add(3)
myList.add(7)
# Perform operations on the list
print ("List Size : " + str(myList.size()))
myList.printList()
print ("---------------------")
myList.deleteNode(3)
print ("List Size : " + str(myList.size()))
myList.printList()
| njanirudh/Python-DataStructures | linked_list.py | Python | mit | 2,365 | 0.009334 |
from flask.ext.flails import FlailsView
from flask import render_template, redirect, url_for, request
#from config import db
import models
import forms
class PrivatePostView(FlailsView):
def private_post_index(self):
object_list = models.Post.query.all()
return render_template('post/index.slim', object_list=object_list)
def private_post_show(self, ident):
post = models.Post.query.get(ident)
form = forms.CommentForm()
return render_template('post/show.slim', post=post, form=form)
def private_post_new(self):
form = forms.PostForm()
if form.validate_on_submit():
post = models.Post(form.name.data, form.title.data, form.content.data)
#db.session.add(post)
#db.session.commit()
return redirect(url_for('post.index'))
return render_template('post/new.slim', form=form)
def private_post_edit(self, ident):
post = models.Post.query.get(ident)
form = forms.PostForm(request.form, post)
if form.validate_on_submit():
post.name = form.name.data
post.title = form.title.data
post.content = form.content.data
#db.session.add(post)
#db.session.commit()
return redirect(url_for('post.show', ident=ident))
return render_template('post/edit.slim', form=form, post=post)
def private_post_delete(self, ident):
post = models.Post.query.get(ident)
db.session.delete(post)
db.session.commit()
return redirect(url_for('post.index'))
def private_comment_new(self, post_id):
post = models.Post.query.get(post_id)
form = forms.CommentForm()
if form.validate_on_submit():
comment = models.Comment(form.commenter.data, form.body.data, post_id)
#db.session.add(comment)
#db.session.commit()
return redirect(url_for('.show', ident=post_id))
return render_template('post/show.slim', post=post, form=form)
def private_comment_delete(self, post_id, ident):
comment = models.Comment.query.get(ident)
#db.session.delete(comment)
#db.session.commit()
return redirect(url_for('.show', ident=post_id))
| thrisp/flails | tests/test_app/blueprints/private/views.py | Python | mit | 2,266 | 0.005296 |
from flask import Response
from flask.views import View
from bson import json_util
from mcp import mongo
class Map(View):
def dispatch_request(self, komuna, viti):
json = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"emri": "$kompania.selia.emri",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"cmimi": {
"$sum": "$kontrata.qmimi"
},
"vlera": {
"$sum": "$kontrata.vlera"
},
"numriKontratave": {
"$sum": 1
}
}
},
{
"$sort": {
"_id.selia": 1
}
},
{
"$project": {
"selia": "$_id.selia",
"emri": "$_id.emri",
"gjeresia": "$_id.gjeresi",
"gjatesia": "$_id.gjatesi",
"cmimi": "$cmimi",
"vlera": "$vlera",
"numriKontratave": "$numriKontratave",
"_id": 0
}
}
])
json_min_max = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"sumCmimi": {
"$sum": "$kontrata.qmimi"
},
"sumVlera": {
"$sum": "$kontrata.vlera"
},
"sumNumriKontratave": {
"$sum": 1
}
}
},
{
"$group": {
"_id": {},
"maxCmimi": {
"$max": "$sumCmimi"
},
"maxVlera": {
"$max": "$sumVlera"
},
"maxNumriKontratave": {
"$max": "$sumNumriKontratave"
},
"minCmimi": {
"$min": "$sumCmimi"
},
"minVlera": {
"$min": "$sumVlera"
},
"minNumriKontratave": {
"$min": "$sumNumriKontratave"
},
}
},
{
"$project": {
"_id": 0,
"vlera": {
"min": "$minVlera",
"max": "$maxVlera",
},
"cmimi": {
"min": "$minCmimi",
"max": "$maxCmimi",
},
"numriKontratave": {
"min": "$minNumriKontratave",
"max": "$maxNumriKontratave",
}
}
}
])
#pergjigjen e kthyer dhe te konvertuar ne JSON ne baze te json_util.dumps() e ruajme ne resp
result_json = {};
result_json['bounds'] = json_min_max['result'][0]
result_json['result'] = json['result']
resp = Response(
response=json_util.dumps(result_json),
mimetype='application/json')
return resp
| opendatakosovo/municipality-procurement-api | mcp/views/map.py | Python | gpl-2.0 | 4,266 | 0.000703 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
@total_ordering
class IntervalWindow(windowed_value._IntervalWindowBase, BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __eq__(self, other):
return (type(self) == type(other)
and self.value == other.value
and self.timestamp == other.timestamp)
def __hash__(self):
return hash((self.value, self.timestamp))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if type(self) != type(other):
return type(self).__name__ < type(other).__name__
if self.value != other.value:
return self.value < other.value
return self.timestamp < other.timestamp
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(GlobalWindow._getTimestampFromProto())
self.start = MIN_TIMESTAMP
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@staticmethod
def _getTimestampFromProto():
ts_millis = int(
common_urns.constants.GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS.constant)
return Timestamp(micros=ts_millis*1000)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(cls, value, timestamp=MIN_TIMESTAMP):
return WindowedValue(value, timestamp, (GlobalWindow(),))
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return common_urns.global_windows.urn, None
@urns.RunnerApiFn.register_urn(common_urns.global_windows.urn, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
def __init__(self, size, offset=0):
"""Initialize a ``FixedWindows`` function for a given size and offset.
Args:
size (int): Size of the window in seconds.
offset(int): Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __hash__(self):
return hash((self.size, self.offset))
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self, size, period, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(start.micros, timestamp.micros - self.size.micros,
-self.period.micros)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (self.size == other.size
and self.offset == other.offset
and self.period == other.period)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.offset, self.period))
def to_runner_api_parameter(self, context):
return (common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros),
period=proto_utils.from_micros(
duration_pb2.Duration, self.period.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()),
period=Duration(micros=fn_parameter.period.ToMicroseconds()))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def merge(self, merge_context):
to_merge = []
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(to_merge,
IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.gap_size)
def to_runner_api_parameter(self, context):
return (common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload(
gap_size=proto_utils.from_micros(
duration_pb2.Duration, self.gap_size.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(
gap_size=Duration(micros=fn_parameter.gap_size.ToMicroseconds()))
| markflyhigh/incubator-beam | sdks/python/apache_beam/transforms/window.py | Python | apache-2.0 | 18,159 | 0.007324 |
#!/usr/bin/env python
# import all needed libraries
import sys
import time
import sockjs.tornado
from tornado import web, ioloop
from sockjs.tornado import SockJSRouter, SockJSConnection
import pigpio
import subprocess
import os
import signal
from thread import start_new_thread
import random
import colorsys
import pyaudio
from scipy.signal import butter, lfilter, freqz
import numpy as np
# Initial setup of GPIO pins
pi = pigpio.pi()
############################### setting basic options ###############################
bright = 255
# The Pins. Use Broadcom numbers.
RED_PIN = 17
GREEN_PIN = 22
BLUE_PIN = 24
# listening port
port = 1713
# Global variables for Music
CHUNK = 512 # How many bytes of audio to read at a time
global HUE
HUE = 0
############################### setting effect options ##############################
mode = "nothing"
############################### basic functions ###############################
class LedController:
def hex_to_rgb(self, hex):
hex = hex.lstrip('#')
lv = len(hex)
rgb = tuple(int(hex[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
return rgb
def rgb_to_hex(self, rgb):
hex = "#"
for i in range(3):
hex + hex(rgb[i]).split('x')[1]
return hex
def hsl_to_rgb(self, hsl):
if any(i > 1 for i in hsl):
hsl[0] /= 360
hsl[1] /= 100
hsl[2] /= 100
colour_tuple = tuple(i * 255 for i in colorsys.hls_to_rgb(hsl[0], hsl[2], hsl[1]))
return colour_tuple
def checkRGB(self, color):
if (color < 0):
color = 0
if (color > 255):
color = 255
return color
def scale(self, brightness):
realBrightness = int(int(brightness) * (float(bright) / 255.0))
# Ensure we are giving correct values
if realBrightness < 0:
realBrightness = 0.0
elif realBrightness > 100:
realBrightness = 100.0
return realBrightness
def setLights(self, pin, brightness):
realBrightness = self.scale(brightness)
pi.set_PWM_dutycycle(pin, realBrightness)
def setRGB(self, r, g, b):
r = self.checkRGB(r)
g = self.checkRGB(g)
b = self.checkRGB(b)
self.setLights(RED_PIN, r)
self.setLights(GREEN_PIN, g)
self.setLights(BLUE_PIN,b)
print "changing color to rgb(" + str(r) + "," + str(g) + "," + str(b) + ")"
#BrokerConnection.color_broadcaster(r,g,b)
def noWhite(self, r, g, b):
r /= 255.0
g /= 255.0
b /= 255.0
HSL = colorsys.rgb_to_hls(r, g, b)
h = HSL[0]
l = HSL[1]
s = HSL[2]
if (l > 0.8):
l *= 0.8 # scale down lightness when higher than 80%
if (s < 0.4):
s = (s * 0.6) + 0.4 # scale saturation up when lower than 40%
return tuple(i * 255 for i in colorsys.hls_to_rgb(h, l, s))
############################### Rainbow functions ###############################
class Rainbow:
def updateColor(self, color, step):
color += step
if color > 255:
color = 255
if color < 0:
color = 0
return color
def fader(self, r, g, b):
lc = LedController()
if not ((r == 255 or r == 0) and (b == 255 or b == 0) and (g == 255 or g == 0) and (r == 255 and g == 255 and b == 255) and (r == 0 and g == 0 and b == 0)):
while r < 255:
r = self.updateColor(r, STEPS)
lc.setRGB(r, g, b)
while b > 0:
b = self.updateColor(b, -STEPS)
lc.setRGB(r, g, b)
while (mode == "Rainbow"):
if r == 255 and b == 0 and g < 255:
g = self.updateColor(g, STEPS)
lc.setRGB(r, g, b)
elif g == 255 and b == 0 and r > 0:
r = self.updateColor(r, -STEPS)
lc.setRGB(r, g, b)
elif r == 0 and g == 255 and b < 255:
b = self.updateColor(b, STEPS)
lc.setRGB(r, g, b)
elif r == 0 and b == 255 and g > 0:
g = self.updateColor(g, -STEPS)
lc.setRGB(r, g, b)
elif g == 0 and b == 255 and r < 255:
r = self.updateColor(r, STEPS)
lc.setRGB(r, g, b)
elif r == 255 and g == 0 and b > 0:
b = self.updateColor(b, -STEPS)
lc.setRGB(r, g, b)
############################### Music functions ###############################
class FreqAnalyser:
# Filtering based on
# http://wiki.scipy.org/Cookbook/ButterworthBandpass
def __init__(self, channels, sample_rate, leds=None):
self.leds = leds # Not needed if just plotting
self.channels = channels
self.sample_rate = sample_rate
self.nyquist = float(sample_rate) / 2
# Filter order - higher the order the sharper
# the curve
order = 3
# Cut off frequencies:
# Low pass filter
cutoff = 200 / self.nyquist
# Numerator (b) and denominator (a)
# polynomials of the filter.
b, a = butter(order, cutoff, btype='lowpass')
self.low_b = b
self.low_a = a
# High pass filter
cutoff = 4000 / self.nyquist
b, a = butter(order, cutoff, btype='highpass')
self.high_b = b
self.high_a = a
# Keep track of max brightness for each
# colour
self.max = [0.0, 0.0, 0.0]
# Make different frequencies fall faster
# bass needs to be punchy.
self.fall = [15.0, 2.5, 5.0]
def filter(self, data):
# Apply low filter
self.low_data = lfilter(self.low_b,
self.low_a,
data)
# Apply high filter
self.high_data = lfilter(self.high_b,
self.high_a,
data)
# Get mid data by doing signal - (low + high)
self.mid_data = np.subtract(data,
np.add(self.low_data,
self.high_data))
@staticmethod
def rms(data):
# Return root mean square of data set
# (i.e. average amplitude)
return np.sqrt(np.mean(np.square(data)))
def change_leds(self):
lc = LedController()
# Get average amplitude
l = []
l.append(self.rms(self.low_data))
l.append(self.rms(self.mid_data))
l.append(self.rms(self.high_data))
if mode == "Music":
HUEcolor = MusicColor
swift = (sum(l) * random.uniform(0, 7))
if swift < 0.5:
swift = 0.5
if (HUEcolor == "Auto"):
global HUE
HUE += swift
else:
rgb = lc.hex_to_rgb(HUEcolor)
rgb = [float(rgb[i]) / 255.0 for i in range(3)]
global HUE
HUE = colorsys.rgb_to_hls(rgb[0], rgb[1], rgb[2])[0] * 360
light = 0.01 + l[0]
if HUE > 360:
HUE = 0 + (HUE - 360)
if light > 0.6:
light = 0.6
RGB = lc.hsl_to_rgb([HUE / 360, 1, light])
elif mode == "Music1":
equalizer = MusicColor
for i in range(3):
# Do any number fudging to make it look better
# here - probably want to avoid high values of
# all because it will be white
# (Emphasise/Reduce bass, mids, treble)
l[i] *= float(equalizer[i])
l[i] = (l[i] * 256) - 1
# Use new val if > previous max
if l[i] > self.max[i]:
self.max[i] = l[i]
else:
# Otherwise, decrement max and use that
# Gives colour falling effect
self.max[i] -= self.fall[i]
if self.max[i] < 0:
self.max[i] = 0
l[i] = self.max[i]
RGB = l
lc.setRGB(RGB[0], RGB[1], RGB[2])
class AudioController:
def __init__(self, leds):
self.line_in = True
self.leds = leds
self.p = pyaudio.PyAudio()
def more(self):
try:
# Return line in data
return self.stream.read(CHUNK)
except:
print "line-in error"
return 'ab'
def analyse(self, data):
# Convert to numpy array and filter
data = np.fromstring(data, dtype=np.int16)
# Convert int16 to float for dsp
data = np.float32(data/32768.0)
# Send to filter
self.analyser.filter(data)
self.analyser.change_leds()
def record_setup(self):
self.channels = 1
self.sample_rate = 44100
self.stream = self.p.open(format = pyaudio.paInt16,
channels = self.channels,
rate = self.sample_rate,
input=True,
frames_per_buffer=CHUNK)
def loop(self):
# Main processing loop
# Do appropriate setup
self.record_setup()
self.analyser = FreqAnalyser(self.channels,
self.sample_rate,
self.leds)
# Read the first block of audio data
data = self.more()
# While there is still audio left
while (mode == "Music") or (mode == "Music1"):
try:
# Analyse data and change LEDs
self.analyse(data)
# Get more audio data
data = self.more()
except KeyboardInterrupt:
break
# Tidy up
self.stream.close()
self.p.terminate()
############################### other Effects functions ###############################
class Effects:
def Flasher(self):
while (mode == "Flash"):
random = self.Random_color()
RGB = LedController().noWhite(random[0],random[1],random[2])
r = RGB[0]
g = RGB[1]
b = RGB[2]
LedController().setRGB(r, g, b)
time.sleep(tempo)
def Strober(self):
while (mode == "Strobe"):
if tempo < 0:
random = self.Random_color()
LedController().setRGB(random[0],random[1],random[2])
self.wait_s(tempo)
LedController().setRGB(0, 0, 0)
self.wait_s(tempo)
else:
LedController().setRGB(255, 255, 255)
self.wait_s(tempo)
LedController().setRGB(0, 0, 0)
self.wait_s(tempo)
def Random_color(self):
h = random.uniform(0, 100) / 100
s = random.uniform(95, 100) / 100
v = random.uniform(88, 100) / 100
return tuple(i * 255 for i in colorsys.hsv_to_rgb(h, s, v))
def wait_s(self,seconds):
if seconds < 0:
time.sleep((seconds) * (-1))
elif seconds >= 0:
time.sleep(seconds)
###################################### Socket ######################################
class BrokerConnection(sockjs.tornado.SockJSConnection):
clients = set()
lc = LedController()
rb = Rainbow()
ef = Effects()
ac = AudioController(lc)
def on_open(self, info):
# When new client comes in, will add it to the clients list
self.clients.add(self)
def on_message(self, message):
# For every incoming message, broadcast it to all clients
#self.broadcast(self.clients, message)
# Set RGB color
if not 'r' in locals():
r = 0
if not 'g' in locals():
g = 0
if not 'b' in locals():
b = 0
aRGB = self.message_analyser(message)
if len(aRGB) == 3:
r = float(aRGB[0])
g = float(aRGB[1])
b = float(aRGB[2])
if not mode == "nothing":
mode = "nothing"
time.sleep(0.1)
self.lc.setRGB(r,g,b)
if len(aRGB) == 2:
global mode
if not mode == "nothing":
threadRunning = True
else:
threadRunning = False
old_mode = mode
mode = aRGB[0]
setting = aRGB[1]
if (mode == 'Rainbow'):
global STEPS
STEPS = float(setting)
print "starting " + mode + "-service with " + str(STEPS) + " steps"
if not mode == old_mode:
start_new_thread(self.rb.fader, (r,g,b))
elif (mode == 'Music' or mode == 'Music1'):
global MusicColor
if mode == 'Music1':
MusicColor = setting.split('#')
else:
MusicColor = setting
print "starting " + mode + "-service with " + str(MusicColor) + " color settings"
if not mode == old_mode:
start_new_thread(self.ac.loop, ())
elif (mode == "Flash"):
global tempo
tempo = float(setting)
print "starting " + mode + "-service with a tempo of " + str(tempo)
if not mode == old_mode:
start_new_thread(self.ef.Flasher, ())
elif (mode == "Strobe"):
global tempo
tempo = float(setting)
print "starting " + mode + "-service with a tempo of " + str(tempo)
if not mode == old_mode:
start_new_thread(self.ef.Strober, ())
def message_analyser(self,msg):
RGBcolor = [0,0,0]
if msg.startswith('#'):
RGBcolor = self.lc.hex_to_rgb(msg)
elif msg.startswith('rgb'):
RGBcolor = [float(i) for i in msg[4:-1].split(',')]
elif msg.startswith('hsl'):
hslString = msg[4:-1].split(',')
for i in range(3):
if '%' in hslString[i]:
pos = hslString[i].index('%')
hslString[i] = hslString[i][:pos]
RGBcolor = self.lc.hsl_to_rgb([float(i) for i in hslString])
elif msg.count(',') == 1:
RGBcolor = msg.split(',')
elif msg.count(',') == 2 and not any(c.isalpha() for c in msg):
RGBcolor = [float(i) for i in msg[4:-1].split(',')]
else:
RGBcolor = [0,0,0]
print "Unsupported color model"
return RGBcolor
def on_close(self):
# If client disconnects, remove him from the clients list
self.clients.remove(self)
def color_broadcaster(self,r,g,b):
rgb = "rgb(" + str(r) + ", " + str(g) + ", " + str(b) + ")"
self.send_message(rgb)
if __name__ == '__main__':
if len(sys.argv) > 1:
options['immediate_flush'] = False
# 1. Create SockJSRouter
BrokerRouter = sockjs.tornado.SockJSRouter(BrokerConnection, '/rgb')
# 2. Create Tornado web.Application
app = web.Application(BrokerRouter.urls)
# 3. Make application listen on port
app.listen(port)
# 4. Every 1 second dump current client count
# ioloop.PeriodicCallback(BrokerConnection.dump_stats, 1000).start()
# 5. Start IOLoop
ioloop.IOLoop.instance().start()
| karaambaa/RGB-LED-Server | server.py | Python | apache-2.0 | 15,826 | 0.006824 |
from time import time
from benchmark import Benchmark
from optimizer.optimizer import Optimizer
from optimizer.simulator import Simulator
from optimizer.evaluator import Evaluator
from extra.printer import pprint, BLUE
class EvaluatorPerf(Benchmark):
def __init__(self, plant, orderList, testNumber):
Benchmark.__init__(self, plant, orderList, testNumber)
self.prefix = "evaluator"
class EvaluatorMachinesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfMachines"
self.startValue = 1
def bench(self):
recipes = []
for o in self.orderList.orders:
recipes.append(o.recipe.recipe[:])
o.recipe.recipe = []
machines = self.plant.machines[:]
self.plant.machines = []
i = self.startValue
while i <= len(machines):
pprint("PERF Number of machines = " + str(i), BLUE)
self.plant.machines = machines[:i]
for j, o in enumerate(self.orderList.orders):
o.recipe.recipe = recipes[j][:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i += 1
class EvaluatorOrdersPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfOrders"
self.startValue = 2
def bench(self):
orders = self.orderList.orders[:]
self.orderList.orders = []
i = self.startValue
while i <= len(orders):
pprint("PERF Number of orders = " + str(i), BLUE)
self.orderList.orders = orders[:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i += 1
class EvaluatorLargeValuesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "LargeValuesMultiplier"
def bench(self):
val = 2
i = self.startValue
while i < 10:
pprint("PERF Large Value = " + str(i * val), BLUE)
for o in self.orderList.orders:
o.deadline *= val
for r in o.recipe.recipe:
r[1] *= val
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 500
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime((i + 1) * val, t)
i += 1
| fredmorcos/attic | projects/plantmaker/plantmaker-main/src/benchmark/evaluatorperf.py | Python | isc | 3,570 | 0.020728 |
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
EXCLUDE_FROM_PACKAGES = ['test_*',]
VERSION = "1.1.0"
INSTALL_REQUIRES = [
'requests',
'Flask'
]
TESTS_REQUIRE = [
'nose',
'httpretty'
]
setup(
name='Flask-HTTP-Forwarding',
version=VERSION,
url='http://www.github.com/casetext/flask-http-forwarding',
author='Casetext, Inc.',
author_email='casetext@casetext.com',
description='Flask extension implementing HTTP forwarding',
license='MIT',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
test_suite="nose.collector",
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| casetext/flask-http-forwarding | setup.py | Python | mit | 1,109 | 0.000902 |
# Copyright 2014 Ahmet Cetinkaya
# This file is part of pastefromhtml.
# pastefromhtml is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pastefromhtml is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pastefromhtml. If not, see <http://www.gnu.org/licenses/>.
from html.parser import HTMLParser
from html.entities import name2codepoint
import re
import sys
import urllib.request, urllib.error, urllib.parse
import os
def assoc(key, pairs):
value = None
for (k, v) in pairs:
if k == key:
value = v
break
return value
#download file of url
def get_url(url, name):
f = open(name,'wb')
req = urllib.request.Request(url)
req.add_unredirected_header('User-agent', 'Mozilla')
f.write(urllib.request.urlopen(req).read())
f.close()
# HTML Clipboard Data Parser
class HTMLCDParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self) # super().__init__() for Pyhon 3
self.zim_str = ""
self.beg = {"h1": "====== ",
"h2": "===== ",
"h3": "==== ",
"h4": "=== ",
"h5": "== ",
"iframe": "[[",
"strong": "**",
"b": "**",
"i": "//",
"em": "//",
"u": "__",
"ins": "__",
"mark": "__",
"pre": "''",
"code": "''",
"blockquote": "",
"strike": "~~",
"del": "~~",
"p": "",
"div": "",
"ol": "",
"ul": "",
"dl": "",
"dt": "",
"dd": "\t",
"li": "",
"table": "",
"caption": "",
"tr": "",
"th": "|",
"td": "|",
"hr": "-----\n",
"sup": "^{",
"sub": "_{",
"span": "",
"figure": "",
"figcaption": "\n",
"abbr": "",
"q": "",
"time": ""}
self.end = {"h1": " ======\n",
"h2": " =====\n",
"h3": " ====\n",
"h4": " ===\n",
"h5": " ==\n",
"iframe": "]]",
"strong": "**",
"b": "**",
"i": "//",
"em": "//",
"u": "__",
"ins": "__",
"mark": "__",
"pre": "''",
"code": "''",
"blockquote": "",
"strike": "~~",
"del": "~~",
"p": "\n",
"div": "\n",
"a": "]]",
"ol": "\n",
"ul": "\n",
"dl": "\n",
"dt": ":\n",
"dd": "\n",
"li": "",
"table": "\n",
"caption": "\n",
"tr": "|\n",
"th": "",
"td": "",
"sup": "}",
"sub": "}",
"figure": "\n",
"figcaption": "\n"}
self.list_type = "ol"
self.item_no = 0
self.inside_p = False
self.inside_pre = False
self.pre_data = ""
self.inside_blockquote = False
self.inside_tag = "" #Indicate label on which we are
self.start_tag = "" #Initial tag in case we have to delete it
self.del_tag = ""
self.tag_attrib = "" #Tag Attribute Value
self.folder = None
self.a_href = "" #Link of a tag
self.inside_li = False
self.list_level = -1
self.inside_iframe = False
self.inside_span = False
self.inside_dl = False
self.inside_table = False
def handle_starttag(self, tag, attrs):
#If we are in a non-nestable tag we do nothing
if self.inside_tag and not (self.inside_tag == "a" and tag == "img" and self.a_href) and not(self.inside_tag == "th" or self.inside_tag == "td" or self.inside_tag == "dt" or self.inside_tag == "dd") and not (tag == "a" and (self.inside_tag == "b" or self.inside_tag == "strong" or self.inside_tag == "i" or self.inside_tag == "em" or self.inside_tag == "u" or self.inside_tag == "ins" or self.inside_tag == "mark" or self.inside_tag == "strike" or self.inside_tag == "del") and self.zim_str.endswith(self.beg[self.inside_tag])):
return
if tag == "blockquote":
self.inside_blockquote = True
#If the tag a is in a non-nestable one, tag a prevails and the previous one is deleted. In block sentences it is not done
if tag == "a" and self.inside_tag and ((self.inside_tag != "pre" and self.inside_tag != "code")):
self.del_tag = self.inside_tag
self.zim_str = self.zim_str[:len(self.zim_str)-len(self.start_tag)]
#Initialize non-nestable tag
if tag != "td" and tag != "dd" and self.beg.get(tag) or tag == "a" and not self.inside_tag:
self.inside_tag = tag
if (tag == "pre" or tag == "code"): #If pre in p
self.inside_pre = True
if tag in list(self.beg.keys()):
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
self.zim_str += blank + self.beg[tag]
self.start_tag = self.beg[tag] #Store start tag to delete it could be somewhere else
if tag == "p":
self.inside_p = True
if self.inside_blockquote:
self.zim_str += "\t"
elif tag == "del":
datetime = assoc("datetime", attrs)
if datetime is not None:
self.tag_attrib = " (" + datetime + ")"
elif tag == "abbr":
title = assoc("title", attrs)
if title is not None:
self.tag_attrib = " (" + title + ")"
elif tag == "q":
cite = assoc("cite", attrs)
if cite is not None:
self.tag_attrib = " ([[#|" + cite + "]])"
self.zim_str += '"'
elif tag == "time":
datetime = assoc("datetime", attrs)
if datetime is not None:
self.tag_attrib = " (" + datetime + ")"
elif tag == "a":
href = assoc("href", attrs)
self.a_href = href #ref of tag
if href is None:
href = "#"
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
#If we are in a table we escape |
if self.inside_table:
pipe = "\|"
else:
pipe = "|"
self.zim_str += blank + "[[{}".format(href) + pipe
elif tag == "ol":
#if we are in a definition list the tab is not put to the dd
if self.inside_dl and self.zim_str.endswith("\t"):
self.zim_str = self.zim_str[:len(self.zim_str)-len("\t")]
#If it is not at the beginning of the line an enter is added
if self.zim_str and not self.zim_str.endswith("\n"):
self.zim_str += "\n"
self.list_type = "ol"
self.item_no = 0
self.list_level += 1
elif tag == "ul":
#if we are in a definition list the tab is not put to the dd
if self.inside_dl and self.zim_str.endswith("\t"):
self.zim_str = self.zim_str[:len(self.zim_str)-len("\t")]
#If it is not at the beginning of the line an enter is added
if self.zim_str and not self.zim_str.endswith("\n"):
self.zim_str += "\n"
self.list_type = "ul"
self.item_no = 0
self.list_level += 1
elif tag == "li":
#If you are in a blockquote add tab
if self.inside_blockquote:
self.zim_str += "\t"
#If tag li no close add enter
if self.inside_li and (self.zim_str and not self.zim_str.endswith("\n")):
self.zim_str += "\n"
self.item_no += 1
self.zim_str += "\t" * self.list_level #Add level
if self.list_type == "ol":
self.zim_str += str(self.item_no) + ". "
else:
self.zim_str += "* "
self.inside_li = True
elif tag == "img":
src = assoc("src", attrs)
if src is None or src == "":
src = "#"
alt = assoc("alt", attrs)
if alt is None:
alt = "Image"
if src != "#" and not self.inside_table:
#If the image and the link match, only the image remains and the label is deleted
if self.inside_tag == "a" and src == self.a_href:
self.zim_str = self.zim_str[:len(self.zim_str)-len("[[" + self.a_href + "|")]
#Si img inside a an <> image then prevails a
if self.inside_tag == "a" and src != self.a_href:
return
img_name = os.path.basename(src)
if not self.folder.exists():
self.folder.touch()
get_url(src, self.folder.path + "/" + img_name)
self.zim_str += "{{./" + img_name + "}}"
else:
if self.inside_table:
self.zim_str += "{0}".format(src)
else:
self.zim_str += "[[{0}|{1}]]".format(src, alt)
elif tag == "iframe":
self.inside_iframe = True
src = assoc("src", attrs)
self.zim_str += "#|" + src
elif tag == "span":
self.inside_span = True
elif tag == "dl":
self.inside_dl = True
elif tag == "table":
self.inside_table = True
def handle_endtag(self, tag):
if self.inside_tag and tag != self.inside_tag and not (self.inside_tag == "th" or self.inside_tag == "td" or self.inside_tag == "dt" or self.inside_tag == "dd" ) and not (tag == "a" and (self.inside_tag == "b" or self.inside_tag == "strong" or self.inside_tag == "i" or self.inside_tag == "em" or self.inside_tag == "u" or self.inside_tag == "ins" or self.inside_tag == "mark" or self.inside_tag == "strike" or self.inside_tag == "del") and self.del_tag):
return
if tag == "blockquote":
self.inside_blockquote = False
#end of nestable tag
if self.inside_tag == tag:
self.inside_tag = "";
#Init href of tag a
if tag == "a":
self.a_href = ""
#If you tag this within another non-nestable it is deleted
if self.del_tag == tag:
self.start_tag = ""
self.del_tag = ""
return
if (tag == "pre" or tag == "code"):
#If tag empty del start tag
if not self.pre_data:
if self.zim_str.endswith("''"):
self.zim_str = self.zim_str[:len(self.zim_str) - 2]
self.pre_data = ""
self.inside_pre = False
return
if self.pre_data.count('\n') > 0:
#Initial tag
if self.zim_str.endswith("''"):
self.zim_str = self.zim_str[:len(self.zim_str) - 2]
self.zim_str += "\n'''\n"
self.zim_str += self.pre_data
#Final Tag
if self.pre_data.count('\n') > 0:
self.zim_str += "\n'''\n"
else:
self.zim_str += self.end[tag]
self.pre_data = ""
self.inside_pre = False
return
#Remove enter before tr, td, th
if tag == "tr" or tag == "td" or tag == "th":
if self.zim_str.endswith("\n"):
self.zim_str = self.zim_str[:len(self.zim_str) - len("\n")]
if tag == "p":
self.inside_p = False
elif tag == "del" and self.tag_attrib:
self.zim_str += self.tag_attrib
self.tag_attrib = ""
elif tag == "abbr" and self.tag_attrib:
self.zim_str += self.tag_attrib
self.tag_attrib = ""
elif tag == "q":
self.zim_str += '"'
if self.tag_attrib:
self.zim_str += self.tag_attrib
if not self.inside_p:
self.zim_str += "\n"
self.tag_attrib = ""
elif tag == "time" and self.tag_attrib:
self.zim_str += self.tag_attrib
self.tag_attrib = ""
elif tag == "iframe":
self.inside_iframe = False
elif tag == "ol" or tag == "ul":
self.list_level -= 1
elif tag == "span":
self.inside_span = False
elif tag == "dl":
self.inside_dl = False
elif tag == "table":
self.inside_table = False
if tag in list(self.end.keys()):
self.start_tag = ""
if tag == "li":
#If li empty del
if self.list_type == "ul" and self.zim_str.endswith("* "):
self.zim_str = self.zim_str[:len(self.zim_str) - 2]
elif self.list_type == "ol" and self.zim_str.endswith(str(self.item_no) + ". "):
self.zim_str = self.zim_str[:len(self.zim_str) - len(str(self.item_no) + ". ")]
self.item_no -= 1
#Add enter if not exists in li tag
elif not self.zim_str.endswith("\n"):
self.zim_str += "\n"
else:
#If we are not at a level higher than the first level of a list and not two last tag finish in \n
if not ((tag == "ol" or tag == "ul") and self.list_level >= 0):
#If tag empty then delete
if tag in list(self.beg.keys()) and self.beg[tag] and self.zim_str.endswith(self.beg[tag]):
self.zim_str = self.zim_str[:len(self.zim_str) - len(self.beg[tag])]
else:
#If not duplicate \n
if not ((tag == "ol" or tag == "ul") and self.zim_str.endswith("\n")):
self.zim_str += self.end[tag]
#If tag li end inside_li false
if tag == "li" or tag == "ul" or tag == "ol":
self.inside_li = False
def handle_data(self, data):
if self.inside_pre: #not clean
self.pre_data += data
else:
if self.inside_iframe:
space_removed_data = ""
else:
#Put as a literal syntax of zim
data = re.sub('(\'\'.+\'\')', r"''\1''", data)
data = re.sub('(\[\[.*\]\])', r"''\1''", data)
data = re.sub('(^=+ .+ =+)', r"''\1''", data)
data = re.sub('(^\t*\* )', r"''\1''", data)
data = re.sub('(^\t*\[[ *x]\] )', r"''\1''", data)
data = re.sub('(\*\*.+\*\*)', r"''\1''", data)
data = re.sub('(\/\/.+\/\/)', r"''\1''", data)
data = re.sub('(__.+__)', r"''\1''", data)
data = re.sub('(~~.+~~)', r"''\1''", data)
data = re.sub('(\^\{.+\})', r"''\1''", data)
data = re.sub('(\_\{.+\})', r"''\1''", data)
#If we are in a span tag, rstrip does not apply
if self.inside_span:
space_removed_data = re.sub(r"[\s]+", " ", data)
else:
space_removed_data = re.sub(r"[\s]+", " ", data.rstrip())
#If we are on a table they escape \ n and |
if self.inside_table:
space_removed_data = space_removed_data.replace("|", "\|")
space_removed_data = space_removed_data.replace("\n", "\\n")
self.zim_str += space_removed_data
def handle_entityref(self, name):
if name in name2codepoint:
c = chr(name2codepoint[name])
if self.inside_pre:
#Add blank when tag not start line
if self.pre_data.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
self.pre_data += blank + c
else:
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
self.zim_str += blank + c
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
self.zim_str += blank + c
def handle_startendtag(self, tag, attrs):
if tag == "br":
self.zim_str += "\n\n"
elif tag == "input":
if assoc("type", attrs) in ["checkbox", "radio"]:
is_checked = assoc("checked", attrs)
if not (is_checked is None) and is_checked.lower() == "true":
self.zim_str += "[*] "
else:
self.zim_str += "[ ] "
elif tag == "img":
src = assoc("src", attrs)
if src is None or src == "":
src = "#"
alt = assoc("alt", attrs)
if alt is None:
alt = "Image"
if src != "#" and not self.inside_table:
#If the image and the link match, only the image remains and the label is deleted
if self.inside_tag == "a" and src == self.a_href:
self.zim_str = self.zim_str[:len(self.zim_str)-len("[[" + self.a_href + "|")]
#If img inside a an <> image then prevails a
if self.inside_tag == "a" and src != self.a_href:
return
img_name = os.path.basename(src)
if not self.folder.exists():
self.folder.touch()
get_url(src, self.folder.path + "/" + img_name)
self.zim_str += "{{./" + img_name + "}}"
else:
if self.inside_table:
self.zim_str += "{0}".format(src)
else:
self.zim_str += "[[{0}|{1}]]".format(src, alt)
elif tag == "hr":
self.zim_str += "-----\n"
def to_zim(self, html_str, folder):
self.folder = folder
self.feed(html_str)
#return self.zim_str.strip() + ("\n\n" if self.zim_str.strip().endswith("|") else "")
return re.sub(r'\n\n+', "\n\n", self.zim_str).strip() + ("\n\n" if self.zim_str.strip().endswith("|") else "")
| cetinkaya/pastefromhtml | htmlcdparser.py | Python | gpl-3.0 | 19,903 | 0.006079 |
from setuptools import setup, find_packages
from helga_github_meta import __version__ as version
setup(
name='helga-github-meta',
version=version,
description=('Provide information for github related metadata'),
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat :: Internet Relay Chat'],
keywords='irc bot github-meta urbandictionary urban dictionary ud',
author='Jon Robison',
author_email='narfman0@gmail.com',
url='https://github.com/narfman0/helga-github-meta',
license='LICENSE',
packages=find_packages(),
include_package_data=True,
py_modules=['helga_github_meta.plugin'],
zip_safe=True,
install_requires=['helga', 'requests'],
test_suite='tests',
entry_points=dict(
helga_plugins=[
'github-meta = helga_github_meta.plugin:github_meta',
],
),
)
| narfman0/helga-github-meta | setup.py | Python | gpl-3.0 | 1,198 | 0 |
from django.contrib import admin
from library.models import Author, Book, Genre, Review
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(Genre)
admin.site.register(Review)
| Nocks/ReadBooks | library/admin.py | Python | mit | 199 | 0 |
# -*- coding: utf-8 -*-
"""
Largest product in a grid
https://projecteuler.net/problem=11
"""
GRID = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
def adjacent_numbers_gen(grid):
# right
for i, row in enumerate(grid):
for j, a in enumerate(row):
if j + 3 == len(row):
break
b, c, d = row[j + 1], row[j + 2], row[j + 3]
yield a, b, c, d
# down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
b, c, d = grid[i + 1][j], grid[i + 2][j], grid[i + 3][j]
yield a, b, c, d
# diagonally right + down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
if j + 3 == len(row):
break
b, c, d = grid[i + 1][j + 1], grid[i + 2][j + 2], grid[i + 3][j + 3]
yield a, b, c, d
# diagonally left + down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
if j - 3 < 0:
continue
b, c, d = grid[i + 1][j - 1], grid[i + 2][j - 2], grid[i + 3][j - 3]
yield a, b, c, d
grid = []
for line in GRID.strip().split('\n'):
grid.append([int(x.strip()) for x in line.split()])
max_product = 0
for a, b, c, d in adjacent_numbers_gen(grid):
max_product = max(max_product, a * b * c * d)
print max_product
| kulapard/projecteuler.net | python/problem_11.py | Python | mit | 2,670 | 0.000749 |
from split_settings.tools import optional, include
include(
'components/base.py',
'components/pagination.py',
optional('components/global.py'),
##
# Local should be after product.py because if default value has not
# been defined in the DATABASE dictionary then it must be defined.
'components/local.py',
scope=globals()
)
| testbed/testbed | testbed/db/djconfig/settings/__init__.py | Python | gpl-3.0 | 359 | 0.002786 |
"""
This integration tests will perform basic operations on a storage element, depending on which protocols are available.
It creates a local hierarchy, and then tries to upload, download, remove, get metadata etc
Potential problems:
* it might seem a good idea to simply add tests for the old srm in it. It is not :-)
There is a deadlock between gfal and gfal2 libraries, you can't load both of them together
* if running in debug mode, you will hit a deadlock with gsiftp :-) https://its.cern.ch/jira/browse/DMC-922
* On some storage (like EOS), there is a caching of metadata. So a file just created, even if present,
might return no metadata information. Sleep times might be needed when this happens.
Examples:
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2>: will test all the gfal2 plugins defined for CERN-GFAL2
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2 GFAL2_XROOT>: will test the GFAL2_XROOT plugins defined for CERN-GFAL2
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import print_function
import unittest
import sys
import os
import tempfile
import shutil
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Test a full DMS workflow against a StorageElement
\t%s <SE name> <PluginLists>
\t<SE name>: mandatory
\t<plugins>: comma separated list of plugin to test (defautl all)
""" % Script.scriptName)
Script.parseCommandLine()
# [SEName, <plugins>]
posArgs = Script.getPositionalArgs()
if not posArgs:
Script.showHelp()
sys.exit(1)
from DIRAC import gLogger
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import getSize
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
#### GLOBAL VARIABLES: ################
# Name of the storage element that has to be tested
gLogger.setLevel('DEBUG')
STORAGE_NAME = posArgs[0]
# Size in bytes of the file we want to produce
FILE_SIZE = 5 * 1024 # 5kB
# base path on the storage where the test files/folders will be created
DESTINATION_PATH = ''
# plugins that will be used
AVAILABLE_PLUGINS = []
if len(posArgs) > 1:
AVAILABLE_PLUGINS = posArgs[1].split(',')
else:
res = StorageElement(STORAGE_NAME).getPlugins()
if not res['OK']:
gLogger.error("Failed fetching available plugins", res['Message'])
sys.exit(2)
AVAILABLE_PLUGINS = res['Value']
try:
res = getProxyInfo()
if not res['OK']:
gLogger.error("Failed to get client proxy information.", res['Message'])
sys.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup(proxyInfo['group'])
DESTINATION_PATH = '/%s/user/%s/%s/gfaltests' % (vo, username[0], username)
except Exception as e: # pylint: disable=broad-except
print(repr(e))
sys.exit(2)
# local path containing test files. There should be a folder called Workflow containing (the files can be simple textfiles)
# FolderA
# -FolderAA
# --FileAA
# -FileA
# FolderB
# -FileB
# File1
# File2
# File3
def _mul(txt):
""" Multiply the input text enough time so that we
reach the expected file size
"""
return txt * (max(1, FILE_SIZE / len(txt)))
class basicTest(unittest.TestCase):
""" This performs all the test, and is just called for a specific plugin
"""
def setUp(self, pluginToTest):
""" Put in place the local directory structure"""
#gLogger.setLevel( 'DEBUG' )
self.LOCAL_PATH = tempfile.mkdtemp()
self.storageName = STORAGE_NAME
# create the local structure
workPath = os.path.join(self.LOCAL_PATH, 'Workflow')
os.mkdir(workPath)
os.mkdir(os.path.join(workPath, 'FolderA'))
with open(os.path.join(workPath, 'FolderA', 'FileA'), 'w') as f:
f.write(_mul('FileA'))
os.mkdir(os.path.join(workPath, 'FolderA', 'FolderAA'))
with open(os.path.join(workPath, 'FolderA', 'FolderAA', 'FileAA'), 'w') as f:
f.write(_mul('FileAA'))
os.mkdir(os.path.join(workPath, 'FolderB'))
with open(os.path.join(workPath, 'FolderB', 'FileB'), 'w') as f:
f.write(_mul('FileB'))
for fn in ["File1", "File2", "File3"]:
with open(os.path.join(workPath, fn), 'w') as f:
f.write(_mul(fn))
# When testing for a given plugin, this plugin might not be able to
# write or read. In this case, we use this specific plugins
# ONLY for the operations it is allowed to
specSE = StorageElement(self.storageName, plugins=pluginToTest)
genericSE = StorageElement(self.storageName)
pluginProtocol = specSE.protocolOptions[0]['Protocol']
if pluginProtocol in specSE.localAccessProtocolList:
print("Using specific SE with %s only for reading" % pluginToTest)
self.readSE = specSE
else:
print("Plugin %s is not available for read. Use a generic SE" % pluginToTest)
self.readSE = genericSE
if pluginProtocol in specSE.localWriteProtocolList:
print("Using specific SE with %s only for writing" % pluginToTest)
self.writeSE = specSE
else:
print("Plugin %s is not available for write. Use a generic SE" % pluginToTest)
self.writeSE = genericSE
# Make sure we are testing the specific plugin at least for one
self.assertTrue(self.readSE == specSE or self.writeSE == specSE,
"Using only generic SE does not make sense!!")
basicTest.clearDirectory(self)
def tearDown(self):
""" Remove the local tree and the remote files """
shutil.rmtree(self.LOCAL_PATH)
self.clearDirectory()
def clearDirectory(self):
""" Removing target directory """
print("==================================================")
print("==== Removing the older Directory ================")
workflow_folder = DESTINATION_PATH + '/Workflow'
res = self.writeSE.removeDirectory(workflow_folder)
if not res['OK']:
print("basicTest.clearDirectory: Workflow folder maybe not empty")
print("==================================================")
def testWorkflow(self):
""" This perform a complete workflow puting, removing, stating files and directories
"""
putDir = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderA'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderB')}
createDir = [os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderAA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderABA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderAAB')
]
putFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderAA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderBB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/File3'): os.path.join(self.LOCAL_PATH,
'Workflow/File3')}
isFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB/FileB'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderB/FileB'),
}
listDir = [os.path.join(DESTINATION_PATH, 'Workflow'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderB')
]
getDir = [os.path.join(DESTINATION_PATH, 'Workflow/FolderA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderB')
]
removeFile = [os.path.join(DESTINATION_PATH, 'Workflow/FolderA/File1')]
rmdir = [os.path.join(DESTINATION_PATH, 'Workflow')]
##### Computing local adler and size #####
fileAdlers = {}
fileSizes = {}
for lfn, localFn in isFile.iteritems():
fileAdlers[lfn] = fileAdler(localFn)
fileSizes[lfn] = getSize(localFn)
########## uploading directory #############
res = self.writeSE.putDirectory(putDir)
self.assertEqual(res['OK'], True)
# time.sleep(5)
res = self.readSE.listDirectory(listDir)
self.assertEqual(any(os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FileA') in dictKey for dictKey in
res['Value']['Successful'][os.path.join(DESTINATION_PATH, 'Workflow/FolderA')]['Files'].keys()), True)
self.assertEqual(any(os.path.join(DESTINATION_PATH, 'Workflow/FolderB/FileB') in dictKey for dictKey in
res['Value']['Successful'][os.path.join(DESTINATION_PATH, 'Workflow/FolderB')]['Files'].keys()), True)
########## createDir #############
res = self.writeSE.createDirectory(createDir)
self.assertEqual(res['OK'], True)
res = res['Value']
self.assertEqual(res['Successful'][createDir[0]], True)
self.assertEqual(res['Successful'][createDir[1]], True)
self.assertEqual(res['Successful'][createDir[2]], True)
######## putFile ########
res = self.writeSE.putFile(putFile)
self.assertEqual(res['OK'], True)
# time.sleep(5)
res = self.readSE.isFile(isFile)
self.assertEqual(res['OK'], True)
self.assertTrue(all([x for x in res['Value']['Successful'].itervalues()]))
# self.assertEqual( res['Value']['Successful'][isFile[0]], True )
# self.assertEqual( res['Value']['Successful'][isFile[1]], True )
######## getMetadata ###########
res = self.readSE.getFileMetadata(isFile)
self.assertEqual(res['OK'], True)
res = res['Value']['Successful']
self.assertEqual(any(path in resKey for path in isFile for resKey in res.keys()), True)
# Checking that the checksums and sizes are correct
for lfn in isFile:
self.assertEqual(res[lfn]['Checksum'], fileAdlers[lfn])
self.assertEqual(res[lfn]['Size'], fileSizes[lfn])
####### getDirectory ######
res = self.readSE.getDirectory(getDir, os.path.join(self.LOCAL_PATH, 'getDir'))
self.assertEqual(res['OK'], True)
res = res['Value']
self.assertEqual(any(getDir[0] in dictKey for dictKey in res['Successful']), True)
self.assertEqual(any(getDir[1] in dictKey for dictKey in res['Successful']), True)
###### removeFile ##########
res = self.writeSE.removeFile(removeFile)
self.assertEqual(res['OK'], True)
res = self.readSE.exists(removeFile)
self.assertEqual(res['OK'], True)
self.assertEqual(res['Value']['Successful'][removeFile[0]], False)
###### remove non existing file #####
res = self.writeSE.removeFile(removeFile)
self.assertEqual(res['OK'], True)
res = self.readSE.exists(removeFile)
self.assertEqual(res['OK'], True)
self.assertEqual(res['Value']['Successful'][removeFile[0]], False)
########### removing directory ###########
res = self.writeSE.removeDirectory(rmdir, True)
res = self.readSE.exists(rmdir)
self.assertEqual(res['OK'], True)
self.assertEqual(res['Value']['Successful'][rmdir[0]], False)
@unittest.skipIf('GFAL2_SRM2' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin GFAL2_SRM2 defined" % STORAGE_NAME)
class GFAL2_SRM2_Test(basicTest):
""" Test using the GFAL2_SRM2 plugin """
def setUp(self):
basicTest.setUp(self, 'GFAL2_SRM2')
@unittest.skipIf('GFAL2_HTTPS' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin GFAL2_HTTPS defined" % STORAGE_NAME)
class GFAL2_HTTPS_Test(basicTest):
""" Test using the GFAL2_HTTPS plugin """
def setUp(self):
basicTest.setUp(self, 'GFAL2_HTTP')
@unittest.skipIf('GFAL2_XROOT' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin GFAL2_XROOT defined" % STORAGE_NAME)
class GFAL2_XROOT_Test(basicTest):
""" Test using the GFAL2_XROOT plugin """
def setUp(self):
basicTest.setUp(self, 'GFAL2_XROOT')
@unittest.skipIf('XROOT' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin XROOT defined" % STORAGE_NAME)
class XROOT_Test(basicTest):
""" Test using the XROOT plugin """
def setUp(self):
basicTest.setUp(self, 'XROOT')
@unittest.skipIf('GFAL2_GSIFTP' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin GFAL2_GSIFTP defined" % STORAGE_NAME)
class GFAL2_GSIFTP_Test(basicTest):
""" Test using the GFAL2_GSIFTP plugin """
def setUp(self):
basicTest.setUp(self, 'GFAL2_GSIFTP')
@unittest.skipIf('SRM2' not in AVAILABLE_PLUGINS,
"StorageElement %s does not have plugin SRM2 defined" % STORAGE_NAME)
class SRM2_Test(basicTest):
def setUp(self):
basicTest.setUp(self, 'SRM2')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GFAL2_SRM2_Test)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GFAL2_XROOT_Test))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GFAL2_HTTPS_Test))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GFAL2_GSIFTP_Test))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(XROOT_Test))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| fstagni/DIRAC | tests/Integration/Resources/Storage/Test_Resources_GFAL2StorageBase.py | Python | gpl-3.0 | 14,328 | 0.008305 |
# -*- coding:utf-8 -*-
import tornado.web
from wechatpy.parser import parse_message
from wechatpy import WeChatClient
TOKEN = '123456'
APPID = 'wxecb5391ec8a58227'
SECRET = 'fa32576b9daa6fd020c0104e6092196a'
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class BaseHandler(object):
def get_client(self):
client = WeChatClient(APPID, SECRET)
a = client.menu.create({
"button": [
{
"type": "click",
"name": "阅读",
"key": "TODAY_READ"
},
{
"type": "click",
"name": "音乐",
"key": "TODAY_MUSIC"
},
{
"name": "时光",
"sub_button": [
{
"type": "click",
"name": "状态",
"key": "TODAY_STATUS"
},
{
"type": "view",
"name": "故事",
"url": "http://wufazhuce.com/"
},
{
"type": "view",
"name": "再见",
"url": "http://byetimes.com/"
},
{
"type": "view",
"name": "关于我们",
"url": "http://www.suyafei.com/"
}
]
}
],
})
return a
if __name__ == '__main__':
client = BaseHandler().get_client()
print (client)
| myyyy/wechatserver | wechatclient/test/test.py | Python | mit | 1,796 | 0.001134 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Prodigal(MakefilePackage):
"""Fast, reliable protein-coding gene prediction for prokaryotic
genomes."""
homepage = "https://github.com/hyattpd/Prodigal"
url = "https://github.com/hyattpd/Prodigal/archive/v2.6.3.tar.gz"
version('2.6.3', '5181809fdb740e9a675cfdbb6c038466')
def install(self, spec, prefix):
make('INSTALLDIR={0}'.format(self.prefix), 'install')
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', prefix)
| EmreAtes/spack | var/spack/repos/builtin/packages/prodigal/package.py | Python | lgpl-2.1 | 1,761 | 0.000568 |
import os
import pytest
import responses
from datetime import date
from datetime import datetime
from datetime import timedelta
from urllib.parse import parse_qs
from urllib.parse import urlparse
from django.conf import settings
from django.utils import timezone
from backend.citeproc import CiteprocError
from backend.citeproc import CiteprocAuthorError
from backend.citeproc import CiteprocContainerTitleError
from backend.citeproc import CiteprocDateError
from backend.citeproc import CiteprocDOIError
from backend.citeproc import CiteprocPubtypeError
from backend.citeproc import CiteprocTitleError
from backend.citeproc import Citeproc
from backend.citeproc import CrossRef
from backend.citeproc import DOIResolver
from papers.baremodels import BareName
from papers.doi import doi_to_crossref_identifier
from papers.doi import doi_to_url
from papers.models import OaiRecord
from papers.models import OaiSource
from papers.models import Paper
from publishers.models import Journal
from publishers.models import Publisher
convert_to_name_pair_list = [
({'family': 'Farge', 'given': 'Marie'}, ('Marie', 'Farge')),
({'literal': 'Marie Farge'}, ('Marie', 'Farge')),
({'literal': 'Farge, Marie'}, ('Marie', 'Farge')),
({'family': 'Arvind'}, ('', 'Arvind')),
]
is_oai_license_params = [
# CC
('http://creativecommons.org/licenses/by-nc-nd/2.5/co/', True),
('http://creativecommons.org/licenses/by-nc/3.10/', True),
('https://creativecommons.org/licenses/by-nc-sa/4.0/', True),
# Other open license
('http://www.elsevier.com/open-access/userlicense/1.0/', True),
# Closed license
('http://link.aps.org/licenses/aps-default-license', False),
('http://www.acs.org/content/acs/en/copyright.html', False),
('http://www.elsevier.com/tdm/userlicense/1.0/', False),
]
class TestCiteproc():
"""
This class groups tests about the Citeproc class
"""
test_class = Citeproc
@pytest.mark.parametrize('url, expected', is_oai_license_params)
def test_is_oa_license(self, url, expected):
assert self.test_class.is_oa_license(url) == expected
@pytest.mark.usefixtures('db')
def test_to_paper(self, container_title, title, citeproc):
p = self.test_class.to_paper(citeproc)
# Ensure that paper is in database (i.e. created)
assert p.pk >= 1
# Check paper fields
for author_p, author_c in zip(p.authors_list, citeproc['author']):
assert author_p['name']['first'] == author_c['given']
assert author_p['name']['last'] == author_c['family']
assert author_p['affiliation'] == author_c['affiliation'][0]['name']
assert author_p['orcid'] == author_c['ORCID']
assert p.pubdate == date(*citeproc['issued']['date-parts'][0])
assert p.title == title
# Ensure that oairecord is in database (i.e. created)
r = OaiRecord.objects.get(about=p)
# Check oairecord fields
assert r.doi == citeproc['DOI']
assert r.identifier == doi_to_crossref_identifier(citeproc['DOI'])
assert r.issue == citeproc['issue']
assert r.journal_title == container_title
assert r.pages == citeproc['page']
assert r.pubdate == date(*citeproc['issued']['date-parts'][0])
assert r.publisher_name == citeproc['publisher']
assert r.source == OaiSource.objects.get(identifier='crossref')
assert r.splash_url == doi_to_url(citeproc['DOI'])
assert r.volume == citeproc['volume']
@pytest.mark.parametrize('mock_function', ['_get_oairecord_data', '_get_paper_data'])
def test_to_paper_invalid_data(self, monkeypatch, mock_function, citeproc):
"""
If data is invalid, i.e. metadata is corrupted, somethings missing or so, must raise exception
"""
def raise_citeproc_error(*args, **kwargs):
raise CiteprocError
monkeypatch.setattr(self.test_class, mock_function, raise_citeproc_error)
with pytest.raises(CiteprocError):
self.test_class.to_paper(citeproc)
def test_to_paper_no_data(self):
"""
If no data, must raise CiteprocError
"""
with pytest.raises(CiteprocError):
self.test_class.to_paper(None)
@pytest.mark.parametrize('name, expected', convert_to_name_pair_list)
def test_convert_to_name_pair(self, name, expected):
"""
Test if name pairing works
"""
assert self.test_class._convert_to_name_pair(name) == expected
@pytest.mark.parametrize('author_elem, expected', [(dict(), None), ({'affiliation' : [{'name' : 'Porto'}]}, 'Porto'), ({'affiliation' : [{'name' : 'Porto'}, {'name' : 'Lissabon'}]}, 'Porto')])
def test_get_affiliation(self, author_elem, expected):
"""
Must return the first affiliation if any
"""
assert self.test_class._get_affiliation(author_elem) == expected
def test_get_abstract(self, citeproc):
"""
Abstract must be set
"""
assert self.test_class._get_abstract(citeproc) == citeproc['abstract']
def test_get_abstact_missing(self, citeproc):
"""
If no abstract, assert blank
"""
del citeproc['abstract']
assert self.test_class._get_abstract(citeproc) == ''
def test_get_abstract_escaping(self, citeproc):
"""
Must do some escaping, e.g. we sometimes get some jats tags
"""
# We wrap the current abstract into some jats
expected = citeproc['abstract']
citeproc['abstract'] = r'<jats:p>{}<\/jats:p>'.format(expected)
assert self.test_class._get_abstract(citeproc) == expected
def test_get_affiliations(self, affiliations, citeproc):
"""
Must have the same length as citeproc['author'] and identical to list of affiliations
"""
r = self.test_class._get_affiliations(citeproc)
assert len(r) == len(citeproc.get('author'))
assert r == affiliations
def test_get_affiliations_no_authors(self, citeproc):
"""
Must rais exception
"""
del citeproc['author']
with pytest.raises(CiteprocAuthorError):
self.test_class._get_affiliations(citeproc)
def test_get_authors(self, citeproc):
"""
The list of authors shall be a list of BareNames
"""
r = self.test_class._get_authors(citeproc)
assert isinstance(r, list)
for barename in r:
assert isinstance(barename, BareName)
def test_get_authors_empty_list(self, citeproc):
"""
The list of authors must not be empty
"""
citeproc['author'] = []
with pytest.raises(CiteprocAuthorError):
self.test_class._get_authors(citeproc)
def test_get_authors_no_list(self, citeproc):
"""
author in citeproc must be a list
"""
del citeproc['author']
with pytest.raises(CiteprocAuthorError):
self.test_class._get_authors(citeproc)
def test_get_authors_invalid_author(self, monkeypatch, citeproc):
"""
If 'None' is an entry, raise exception
"""
# We mock the function and let it return None, so that name_pairs is a list of None
monkeypatch.setattr(self.test_class, '_convert_to_name_pair', lambda x: None)
with pytest.raises(CiteprocAuthorError):
self.test_class._get_authors(citeproc)
def test_get_container(self, container_title, citeproc):
"""
Must return container title
"""
assert self.test_class._get_container(citeproc) == container_title
def test_get_container_missing(self):
"""
Must return exception
"""
with pytest.raises(CiteprocContainerTitleError):
self.test_class._get_container(dict())
def test_get_doi(self, citeproc):
"""
Must return the DOI
"""
assert self.test_class._get_doi(citeproc) == citeproc['DOI']
def test_get_doi_invalid(self):
"""
Must raise exception
"""
with pytest.raises(CiteprocDOIError):
self.test_class._get_doi({'DOI' : 'spanish inquisition'})
def test_get_doi_missing(self):
"""
Must raise exception
"""
with pytest.raises(CiteprocDOIError):
self.test_class._get_doi(dict())
@pytest.mark.parametrize('issn, expected', [('1234-5675', '1234-5675'), (['1234-5675', ], '1234-5675'), ([], '')])
def test_get_issn(self, citeproc, issn, expected):
"""
Must return the issn or ''
"""
citeproc['ISSN'] = issn
assert self.test_class._get_issn(citeproc) == expected
def test_get_issn_missing(self, citeproc):
"""
Must return ''
"""
del citeproc['ISSN']
assert self.test_class._get_issn(citeproc) == ''
@pytest.mark.usefixtures('db', 'mock_alias_publisher_increment', 'mock_journal_find', 'mock_publisher_find')
@pytest.mark.parametrize('journal', [Journal(publisher=Publisher()), None])
def test_get_oairecord_data(self, monkeypatch, container_title, issn, citeproc, journal):
"""
We do some assertions on the results, but relatively lax, as we test the called functions, too
"""
monkeypatch.setattr(Journal, 'find', lambda issn, title: journal)
r = self.test_class._get_oairecord_data(citeproc)
assert r['doi'] == citeproc['DOI']
assert r['description'] == citeproc['abstract']
assert r['identifier'] == doi_to_crossref_identifier(citeproc['DOI'])
assert r['issn'] == issn
assert r['issue'] == citeproc['issue']
assert r['journal'] == journal
assert r['journal_title'] == container_title
assert r['pages'] == citeproc['page']
assert r['pdf_url'] == '' # Is not OA
assert r['pubdate'] == date(*citeproc['issued']['date-parts'][0])
assert r['publisher_name'] == citeproc['publisher']
assert r['pubtype'] == citeproc['type']
assert r['source'] == OaiSource.objects.get(identifier='crossref')
assert r['splash_url'] == doi_to_url(citeproc['DOI'])
assert r['volume'] == citeproc['volume']
@pytest.mark.usefixtures('db', 'mock_journal_find', 'mock_publisher_find')
def test_get_oairecord_data_missing(self, monkeypatch, container_title, issn, citeproc):
"""
Some fields may be empty, namely those with a direct get call
"""
keys = ['abstract', 'issue', 'publisher', 'page', 'volume']
for k in keys:
del citeproc[k]
r = self.test_class._get_oairecord_data(citeproc)
keys = ['description', 'issue', 'publisher_name', 'pages', 'volume']
for k in keys:
assert r[k] == ''
@pytest.mark.parametrize('orcid, expected', [({'ORCID' : '0000-0001-8187-9704'}, '0000-0001-8187-9704'), ({'ORCID' : '0000-0001-8187-9705'}, None), ({}, None)])
def test_get_orcid(self, orcid, expected):
"""
Must be valid or None
"""
assert self.test_class._get_orcid(orcid) == expected
def test_get_orcids(self, orcids, citeproc):
"""
Must have the same length as citeproc['author'] and identical to list of orcid
"""
r = self.test_class._get_orcids(citeproc)
assert len(r) == len(citeproc.get('author'))
assert r == orcids
def test_get_orcid_no_authors(self, citeproc):
"""
Must rais exception
"""
del citeproc['author']
with pytest.raises(CiteprocAuthorError):
self.test_class._get_orcids(citeproc)
def test_get_paper_data(self, affiliations, orcids, title, citeproc):
"""
We do some assertions on the results, but relatively lax, as we test the called functions, too
"""
r = self.test_class._get_paper_data(citeproc)
assert r['affiliations'] == affiliations
for a in r['author_names']:
assert isinstance(a, BareName)
assert r['orcids'] == orcids
assert r['pubdate'] == date(*citeproc['issued']['date-parts'][0])
assert r['title'] == title
@pytest.mark.parametrize('doi', [True, False])
@pytest.mark.parametrize('license', [True, False])
def test_get_pdf_url(self, monkeypatch, doi, license):
"""
Must return true or false
"""
monkeypatch.setattr(self.test_class, '_is_oa_by_doi', lambda x: doi)
monkeypatch.setattr(self.test_class, '_is_oa_by_license', lambda x: license)
url = 'https://repository.dissem.in/entry/3242/document.pdf'
r = self.test_class._get_pdf_url(doi, license, url)
if doi or license:
assert r == url
else:
assert r == ''
def test_get_pubdate_issued(self, citeproc):
"""
If contains issued, take this
"""
citeproc['created'] = {'date-parts' : [[2019, 10, 11]]}
citeproc['deposited'] = {'date-parts' : [[2019, 10, 12]]}
assert self.test_class._get_pubdate(citeproc) == date(*citeproc['issued']['date-parts'][0])
def test_get_pubdate_created(self, citeproc):
"""
If contains no issued, take created
"""
del citeproc['issued']
citeproc['created'] = {'date-parts' : [[2019, 10, 11]]}
citeproc['deposited'] = {'date-parts' : [[2019, 10, 12]]}
assert self.test_class._get_pubdate(citeproc) == date(*citeproc['created']['date-parts'][0])
def test_get_pubdate_deposited(self, citeproc):
"""
If contains no issued and created, take deposited
"""
del citeproc['issued']
citeproc['deposited'] = {'date-parts' : [[2019, 10, 12]]}
assert self.test_class._get_pubdate(citeproc) == date(*citeproc['deposited']['date-parts'][0])
def test_get_pubdate_no_date(self, citeproc):
"""
If contains no date, raise exception
"""
del citeproc['issued']
with pytest.raises(CiteprocDateError):
self.test_class._get_pubdate(citeproc)
def test_get_pubdate_received_none(self, monkeypatch):
"""
If no valid date is found, raise exception
"""
monkeypatch.setattr(self.test_class, '_parse_date', lambda x: None)
with pytest.raises(CiteprocDateError):
self.test_class._get_pubdate(dict())
@pytest.mark.usefixtures('mock_alias_publisher_increment')
def test_get_publisher_by_journal(self):
"""
Must return Publisher object
"""
publisher = Publisher()
journal = Journal(
publisher=publisher
)
assert self.test_class._get_publisher('p_name', journal) == publisher
def test_get_publisher_by_name(self, monkeypatch):
"""
Must return publisher object
"""
publisher = Publisher()
monkeypatch.setattr(Publisher, 'find', lambda x: publisher)
assert self.test_class._get_publisher('p_name', None) == publisher
def test_get_pubtype(self):
"""
Must return something from PAPER_TYPES
"""
pubtype = 'book'
assert self.test_class._get_pubtype({'type' : pubtype}) == pubtype
def test_get_pubtype_strange(self):
"""
Must return other
"""
assert self.test_class._get_pubtype({'type' : 'spanish inquisition'}) == 'other'
def test_get_pubtype_missing(self):
"""
Must raise exception
"""
with pytest.raises(CiteprocPubtypeError):
self.test_class._get_pubtype(dict())
def test_get_title(self, citeproc):
r = self.test_class._get_title(citeproc)
assert r == citeproc['title'][:1024]
assert len(r) <= 1024
def test_get_title_length(self, citeproc):
"""
Title must no be longer than 1024 chars
"""
citeproc['title'] = 'x' * 2000
r = self.test_class._get_title(citeproc)
assert r == citeproc['title'][:1024]
assert len(r) <= 1024
def test_get_title_length_with_unicode(self, citeproc):
citeproc['title'] = '–' * 1024
r = self.test_class._get_title(citeproc)
assert r == citeproc['title'][:341]
assert len(r) <= 1024
def test_get_title_no_title(self, citeproc):
"""
Title is mandatory
"""
del citeproc['title']
with pytest.raises(CiteprocTitleError):
self.test_class._get_title(citeproc)
def test_get_title_emtpy_string(self, citeproc):
"""
If no title is found, expect CiteprocTitleError
"""
citeproc['title'] = ''
with pytest.raises(CiteprocTitleError):
self.test_class._get_title(citeproc)
@pytest.mark.parametrize('doi, expected', [('10.2195/spam', True), ('10.15122/spam', False)])
def test_is_oa_by_doi(self, doi, expected):
"""
Must be true or false
"""
assert self.test_class._is_oa_by_doi(doi) == expected
@pytest.mark.parametrize('licenses, expected', [([{'URL' : 'creativecommons.org/licenses/'}], True), ([{'URL' : 'https://dissem.in/not_free'}], False), ([{}], False), ([], False)])
def test_is_oa_by_license(self, licenses, expected):
"""
Must be true or false
"""
assert self.test_class._is_oa_by_license(licenses) == expected
@pytest.mark.parametrize('data, expected', [({'date-parts' : [[2019, 10, 10]]}, date(2019, 10, 10)), ({'raw' : '2019-10-10'}, date(2019, 10, 10)), (None, None), ({'spam' : 'ham'}, None)])
def test_parse_date(self, data, expected):
"""
Must return a valid date or None
"""
assert self.test_class._parse_date(data) == expected
@pytest.mark.parametrize('date_parts, expected', [([2019, ], date(2019, 1, 1)), ([2019, 10, ], date(2019, 10, 1)), ([2019, 10, 10], date(2019, 10, 10))])
def test_parse_date_parts(self, date_parts, expected):
"""
Must parse the date list
"""
assert self.test_class._parse_date_parts(date_parts) == expected
class TestCrossRef(TestCiteproc):
"""
This class groups tests about the CrossRef class
"""
test_class = CrossRef
@pytest.fixture
def citeproc(self, container_title, title, citeproc):
"""
In general, the CrossRef is identical to citeproc, but there are some differences.
We change the fixture accordingly
title is a list
container-title is a list
"""
citeproc['title'] = [title, ]
citeproc['container-title'] = [container_title, ]
return citeproc
@pytest.mark.usefixtures('db')
def test_fetch_latest_records(self, monkeypatch):
"""
Essentially, we test if source date is updated
"""
def ret_func(day):
# Datetime objects are date objects
assert isinstance(day, date)
assert not isinstance(day, datetime)
return None
monkeypatch.setattr(self.test_class, '_fetch_day', ret_func)
source = OaiSource.objects.get(identifier='crossref')
# For some reason, last_update ist not 1970
source.last_update = timezone.now() - timedelta(days=10)
source.save()
self.test_class.fetch_latest_records()
source.refresh_from_db()
assert source.last_update.date() == timezone.now().date() - timedelta(days=1)
@responses.activate
@pytest.mark.usefixtures('db')
def test_fetch_batch(self):
dois = ['10.1016/j.gsd.2018.08.007', '10.1109/sYnAsc.2010.88']
f_path = os.path.join(settings.BASE_DIR, 'backend', 'tests', 'data', 'crossref_batch.json')
with open(f_path, 'r') as f:
body = f.read()
responses.add(
responses.GET,
url='https://api.crossref.org/works',
body=body,
status=200,
)
papers = self.test_class.fetch_batch(dois)
called_url = responses.calls[0].request.url
query = parse_qs(urlparse(called_url).query)
query_f = query['filter'][0].split(',')
for doi, filter_doi in zip(dois, query_f):
assert doi == filter_doi.split(':')[1]
for paper in papers:
assert isinstance(paper, Paper)
for paper, doi in zip(papers, dois):
assert paper.get_doi() == doi.lower()
@responses.activate
@pytest.mark.usefixtures('db')
def test_fetch_batch_doi_not_found(self):
"""
If doi is not in result list, entry must be none
"""
f_path = os.path.join(settings.BASE_DIR, 'backend', 'tests', 'data', 'crossref_batch.json')
with open(f_path, 'r') as f:
body = f.read()
responses.add(
responses.GET,
url='https://api.crossref.org/works',
body=body,
status=200,
)
dois = ['10.1016/j.gsd.2018.08.007', '10.1109/sYnAsc.2010.88']
doi_invalid = '10.spanish/inquisition'
dois.append(doi_invalid)
papers = self.test_class.fetch_batch(dois)
assert papers[2] is None
@responses.activate
@pytest.mark.usefixtures('db')
def test_fetch_batch_doi_with_comma(self):
"""
If doi has comma, entry must be none
"""
f_path = os.path.join(settings.BASE_DIR, 'backend', 'tests', 'data', 'crossref_batch.json')
with open(f_path, 'r') as f:
body = f.read()
responses.add(
responses.GET,
url='https://api.crossref.org/works',
body=body,
status=200,
)
dois = ['10.1016/j.gsd.2018.08.007', '10.1109/sYnAsc.2010.88']
doi_comma= '10.spanish,inquisition'
dois.append(doi_comma)
papers = self.test_class.fetch_batch(dois)
assert papers[2] is None
@pytest.mark.usefixtures('db', 'mock_crossref')
def test_fetch_batch_doi_with_backslash(self):
"""
CrossRef just drops backslash in search, so that such a DOI is not present in return list, while ingested correctly into the system
"""
dois = [r'10.1007/978-3-319-66824-6\_35']
r = self.test_class.fetch_batch(dois)
assert isinstance(r[0], Paper)
@pytest.mark.usefixtures('db')
def test_fetch_day(self, rsps_fetch_day):
"""
Here we imitate the CrossRef API in a very simple version.
We mock the request and inspect the params.
Then we return a result that we have gotten from CrossRef
"""
self.test_class.rows = 30
self.test_class.emit_status_every = 3
day = date.today()
self.test_class._fetch_day(day)
# Some assertions
called_url = rsps_fetch_day.calls[0].request.url
query = parse_qs(urlparse(called_url).query)
query_f = query['filter'][0].split(',')
for date_filter in ['from-update-date', 'until-update-date']:
assert date_filter + ':{}'.format(day) in query_f
assert query['rows'][0] == str(self.test_class.rows)
assert query['mailto'][0] == settings.CROSSREF_MAILTO
@pytest.mark.usefixtures('db')
def test_fetch_day_citeproc_error(self, monkeypatch, rsps_fetch_day):
"""
If a CiteprocError raises, do not starve
"""
def callback(*args, **kwargs):
raise CiteprocError('Error')
monkeypatch.setattr(self.test_class, 'to_paper', callback)
day = date.today()
self.test_class._fetch_day(day)
@pytest.mark.usefixtures('db')
def test_fetch_day_value_error(self, monkeypatch, rsps_fetch_day):
"""
If a ValueError raises, do not starve
"""
def callback(*args, **kwargs):
raise ValueError('Error')
monkeypatch.setattr(self.test_class, 'to_paper', callback)
day = date.today()
self.test_class._fetch_day(day)
def test_filter_dois_by_comma(self):
"""
Tests filtering of DOIs wheter they have a ',' or not
"""
doi = '10.a'
doi_comma = '10.a,b'
dois = [doi, doi_comma]
assert self.test_class._filter_dois_by_comma(dois) == [doi]
def test_get_title(self, citeproc):
"""
CrossRef does serve the title in a list
"""
r = self.test_class._get_title(citeproc)
assert r == citeproc.get('title')[:1024]
assert len(r) <= 1024
def test_get_title_length(self, citeproc):
"""
CrossRef does serve the title in a list. Must not be longer than 1024 chars
"""
citeproc['title'] = ['x' * 2000, ]
r = self.test_class._get_title(citeproc)
assert r == citeproc.get('title')[:1024]
assert len(r) <= 1024
def test_get_title_list_error(self, citeproc):
"""
CrossRef does serve the title in a list
List must not be non-empty
"""
citeproc['title'] = list()
with pytest.raises(CiteprocTitleError):
self.test_class._get_title(citeproc)
def test_get_title_emtpy_string(self, citeproc):
"""
If no title is found, expect CiteprocTitleError
"""
citeproc['title'] = ['',]
with pytest.raises(CiteprocTitleError):
self.test_class._get_title(citeproc)
def test_remove_unapproved_characters(self):
"""
Must return only keep "a-z", "A-Z", "0-9" and "-._;()/"
"""
assert self.test_class.remove_unapproved_characters(r'10.1007/978-3-319-66824-6\_35') == '10.1007/978-3-319-66824-6_35'
class TestDOI(TestCiteproc):
"""
This class groups tests about the DOI class
"""
test_class = DOIResolver
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize('doi', ['10.1016/j.gsd.2018.08.007', '10.1109/sYnAsc.2010.88'])
def test_save_doi(self, mock_doi, doi):
"""
Must save the paper
"""
p = self.test_class.save_doi(doi)
# Header must be set
assert mock_doi.calls[0].request.headers.get('Accept') == 'application/citeproc+json'
# Check if paper is created
assert p.pk >= 1
r = OaiRecord.objects.get(about=p)
assert r.journal_title is not ''
assert r.publisher_name is not ''
@pytest.mark.usefixtures('db')
def test_save_doi_existing(self, mock_doi):
"""
If DOI is already in system, expect not a new paper, but the one from the database
"""
doi = '10.1016/j.gsd.2018.08.007'
p = self.test_class.save_doi(doi)
q = self.test_class.save_doi(doi)
assert p == q
| wetneb/dissemin | backend/tests/test_citeproc.py | Python | agpl-3.0 | 26,785 | 0.003136 |
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Runs the main function in detokenize.py."""
from pw_tokenizer import detokenize
detokenize.main()
| google/pigweed | pw_tokenizer/py/pw_tokenizer/__main__.py | Python | apache-2.0 | 687 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0002_second")]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
| DONIKAN/django | tests/migrations2/test_migrations_2/0001_initial.py | Python | bsd-3-clause | 627 | 0 |
#!/usr/bin/env python3
import os, sys, signal, argparse, configparser, traceback, time
from contextlib import closing
from ananas import PineappleBot
import ananas.default
# Add the cwd to the module search path so that we can load user bot classes
sys.path.append(os.getcwd())
bots = []
def shutdown_all(signum, frame):
for bot in bots:
if bot.state == PineappleBot.RUNNING: bot.shutdown()
sys.exit("Shutdown complete")
def main():
parser = argparse.ArgumentParser(description="Pineapple command line interface.", prog="ananas")
parser.add_argument("config", help="A cfg file to read bot configuration from.")
parser.add_argument("-v", "--verbose", action="store_true", help="Log more extensive messages for e.g. debugging purposes.")
parser.add_argument("-i", "--interactive", action="store_true", help="Use interactive prompts for e.g. mastodon login")
args = parser.parse_args()
prog = sys.argv[0]
cfg = configparser.ConfigParser()
try: cfg.read(args.config)
except FileNotFoundError:
sys.exit("Couldn't open '{}', exiting.".format(args.config))
for bot in cfg:
if bot == "DEFAULT": continue
if not "class" in cfg[bot]:
print("{}: no class specified, skipping {}.".format(prog, bot))
continue
botclass = cfg[bot]["class"]
module, _, botclass = botclass.rpartition(".")
if module == "":
print("{}: no module given in class name '{}', skipping {}.".format(prog, botclass, bot))
try:
exec("from {0} import {1}; bots.append({1}('{2}', name='{3}', interactive={4}, verbose={5}))"
.format(module, botclass, args.config, bot, args.interactive, args.verbose))
except ModuleNotFoundError as e:
print("{}: encountered the following error loading module {}:".format(prog, module))
print("{}: the error was: {}".format(prog, e))
print("{}: skipping {}!".format(prog, bot))
continue
except Exception as e:
print("{}: fatal exception loading bot {}: {}\n{}".format(prog, bot, repr(e), traceback.format_exc()))
continue
except KeyboardInterrupt:
sys.exit()
signal.signal(signal.SIGINT, shutdown_all)
signal.signal(signal.SIGABRT, shutdown_all)
signal.signal(signal.SIGTERM, shutdown_all)
try:
while(True): time.sleep(60)
except KeyboardInterrupt:
shutdown_all(None, None)
if __name__ == "__main__":
main()
| Chronister/ananas | ananas/run.py | Python | mit | 2,535 | 0.007495 |
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import GmiiFrame, GmiiPhy
from cocotbext.axi import AxiStreamBus, AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut, speed=1000e6):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.gtx_clk, 8, units="ns").start())
cocotb.start_soon(Clock(dut.logic_clk, 8, units="ns").start())
self.gmii_phy = GmiiPhy(dut.gmii_txd, dut.gmii_tx_er, dut.gmii_tx_en, dut.mii_tx_clk, dut.gmii_tx_clk,
dut.gmii_rxd, dut.gmii_rx_er, dut.gmii_rx_dv, dut.gmii_rx_clk, speed=speed)
self.axis_source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "tx_axis"), dut.logic_clk, dut.logic_rst)
self.axis_sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "rx_axis"), dut.logic_clk, dut.logic_rst)
dut.ifg_delay.setimmediatevalue(0)
async def reset(self):
self.dut.gtx_rst.setimmediatevalue(0)
self.dut.logic_rst.setimmediatevalue(0)
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.gtx_rst <= 1
self.dut.logic_rst <= 1
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.gtx_rst <= 0
self.dut.logic_rst <= 0
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
def set_speed(self, speed):
pass
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=1000e6):
tb = TB(dut, speed)
tb.gmii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.set_speed(speed)
await tb.reset()
for k in range(100):
await RisingEdge(dut.rx_clk)
if speed == 10e6:
assert dut.speed == 0
elif speed == 100e6:
assert dut.speed == 1
else:
assert dut.speed == 2
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = GmiiFrame.from_payload(test_data)
await tb.gmii_phy.rx.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.rx_clk)
await RisingEdge(dut.rx_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=1000e6):
tb = TB(dut, speed)
tb.gmii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.set_speed(speed)
await tb.reset()
for k in range(100):
await RisingEdge(dut.rx_clk)
if speed == 10e6:
assert dut.speed == 0
elif speed == 100e6:
assert dut.speed == 1
else:
assert dut.speed == 2
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.gmii_phy.tx.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.error is None
assert tb.gmii_phy.tx.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
def size_list():
return list(range(60, 128)) + [512, 1514] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("speed", [1000e6, 100e6, 10e6])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
def test_eth_mac_1g_gmii_fifo(request):
dut = "eth_mac_1g_gmii_fifo"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "eth_mac_1g_gmii.v"),
os.path.join(rtl_dir, "gmii_phy_if.v"),
os.path.join(rtl_dir, "ssio_sdr_in.v"),
os.path.join(rtl_dir, "ssio_sdr_out.v"),
os.path.join(rtl_dir, "oddr.v"),
os.path.join(rtl_dir, "eth_mac_1g.v"),
os.path.join(rtl_dir, "axis_gmii_rx.v"),
os.path.join(rtl_dir, "axis_gmii_tx.v"),
os.path.join(rtl_dir, "lfsr.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
parameters['AXIS_DATA_WIDTH'] = 8
parameters['AXIS_KEEP_ENABLE'] = int(parameters['AXIS_DATA_WIDTH'] > 8)
parameters['AXIS_KEEP_WIDTH'] = parameters['AXIS_DATA_WIDTH'] // 8
parameters['ENABLE_PADDING'] = 1
parameters['MIN_FRAME_LENGTH'] = 64
parameters['TX_FIFO_DEPTH'] = 16384
parameters['TX_FRAME_FIFO'] = 1
parameters['TX_DROP_OVERSIZE_FRAME'] = parameters['TX_FRAME_FIFO']
parameters['TX_DROP_BAD_FRAME'] = parameters['TX_DROP_OVERSIZE_FRAME']
parameters['TX_DROP_WHEN_FULL'] = 0
parameters['RX_FIFO_DEPTH'] = 16384
parameters['RX_FRAME_FIFO'] = 1
parameters['RX_DROP_OVERSIZE_FRAME'] = parameters['RX_FRAME_FIFO']
parameters['RX_DROP_BAD_FRAME'] = parameters['RX_DROP_OVERSIZE_FRAME']
parameters['RX_DROP_WHEN_FULL'] = parameters['RX_DROP_OVERSIZE_FRAME']
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| alexforencich/verilog-ethernet | tb/eth_mac_1g_gmii_fifo/test_eth_mac_1g_gmii_fifo.py | Python | mit | 7,506 | 0.001066 |
# -*- coding: utf-8 -*-
from celery import Celery
from server import config
""" Celery configuration module.
"""
def make_celery(app):
""" Flask integration with celery. Taken from
http://flask.pocoo.org/docs/0.12/patterns/celery/
"""
celery = Celery(app.import_name, backend=config.CELERY_RESULT_BACKEND,
broker=config.CELERY_BROKER_URL)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
| abhishekpathak/recommendation-system | recommender/server/settings/celery_conf.py | Python | mit | 695 | 0.001439 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 15:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('silo', '0029_auto_20170915_0810'),
]
operations = [
migrations.RemoveField(
model_name='silo',
name='workflowlevel1',
),
migrations.AddField(
model_name='silo',
name='workflowlevel1',
field=models.ManyToManyField(blank=True, null=True, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='tolauser',
name='workflowlevel1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='workflowlevel1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
]
| mercycorps/TolaTables | silo/migrations/0030_auto_20170915_0828.py | Python | gpl-2.0 | 1,103 | 0.00272 |
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urlparse import urljoin
from django.http import HttpResponse
from django.template import Context
from edxmako import lookup_template
from edxmako.request_context import get_template_request_context
from django.conf import settings
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.theming.helpers import get_template_path, is_request_in_themed_site
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if enable_mktg_site and name in marketing_urls:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return marketing_urls.get('ROOT')
# Using urljoin here allows us to enable a marketing site and set
# a site ROOT, but still specify absolute URLs for other marketing
# URLs in the MKTG_URLS setting
# e.g. urljoin('http://marketing.com', 'http://open-edx.org/about') >>> 'http://open-edx.org/about'
return urljoin(marketing_urls.get('ROOT'), marketing_urls.get(name))
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.debug("Cannot find corresponding link for name: %s", name)
return '#'
def is_any_marketing_link_set(names):
"""
Returns a boolean if any given named marketing links are configured.
"""
return any(is_marketing_link_set(name) for name in names)
def is_marketing_link_set(name):
"""
Returns a boolean if a given named marketing link is configured.
"""
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if enable_mktg_site:
return name in marketing_urls
else:
return name in settings.MKTG_URL_LINK_MAP
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
marketing_urls.viewkeys()
)
]
)
def footer_context_processor(request): # pylint: disable=unused-argument
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_REQUEST_IN_MICROSITE", is_request_in_themed_site())
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main', request=None):
"""
Render a Mako template to as a string.
The following values are available to all templates:
settings: the django settings object
EDX_ROOT_URL: settings.EDX_ROOT_URL
marketing_link: The :func:`marketing_link` function
is_any_marketing_link_set: The :func:`is_any_marketing_link_set` function
is_marketing_link_set: The :func:`is_marketing_link_set` function
Arguments:
template_name: The name of the template to render. Will be loaded
from the template paths specified in configuration.
dictionary: A dictionary of variables to insert into the template during
rendering.
context: A :class:`~django.template.Context` with values to make
available to the template.
namespace: The Mako namespace to find the named template in.
request: The request to use to construct the RequestContext for rendering
this template. If not supplied, the current request will be used.
"""
template_name = get_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
context_instance['is_any_marketing_link_set'] = is_any_marketing_link_set
context_instance['is_marketing_link_set'] = is_marketing_link_set
# In various testing contexts, there might not be a current request context.
request_context = get_template_request_context(request)
if request_context:
for item in request_context:
context_dictionary.update(item)
for item in context_instance:
context_dictionary.update(item)
if context:
context_dictionary.update(context)
# "Fix" CSRF token by evaluating the lazy object
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = unicode(context_dictionary[key])
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', request=None, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace, request), **kwargs)
| synergeticsedx/deployment-wipro | common/djangoapps/edxmako/shortcuts.py | Python | agpl-3.0 | 7,383 | 0.00149 |
import random
import re
import vsphere_inventory as vsphere
from os.path import join, dirname
try:
import json
except ImportError:
import simplejson as json
def readNamesFrom(filepath):
with open(filepath) as f:
return f.readlines()
def randomName(lefts, rights):
left = random.choice(lefts).rstrip()
right = random.choice(rights).rstrip()
return left + '-' + right
def nodeExists(knownNames, name):
matches = [n for n in knownNames if re.match(name + '(\.|$)', n)]
return len(matches) > 0
def generateName(knownNames):
leftSides = readNamesFrom(join(dirname(__file__), 'names', 'lefts.txt'))
rightSides = readNamesFrom(join(dirname(__file__), 'names', 'rights.txt'))
for i in range(10):
name = randomName(leftSides, rightSides)
if not nodeExists(knownNames, name):
return name
else:
print('Failed to generate a new, unique, name after 10 attempts')
exit(2)
if __name__ == '__main__':
parser = vsphere.argparser()
args = parser.parse_args()
vs = vsphere.vsphereConnect(args.server, args.user, args.password)
vimSession = vsphere.vimLogin(vs)
vms = vsphere.vmsAtPath(vs, vimSession, args.path)
vmList = [vm['hostname'] for vm in vms]
newName = generateName(vmList)
print(newName)
| lymingtonprecision/maat | ansible/name_generator.py | Python | mit | 1,321 | 0.004542 |
# -*- coding: utf-8 -*-
#
# This file is part of PyGaze - the open-source toolbox for eye tracking
#
# PyGaze is a Python module for easily creating gaze contingent experiments
# or other software (as well as non-gaze contingent experiments/software)
# Copyright (C) 2012-2013 Edwin S. Dalmaijer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from pygaze.sound import Sound
| esdalmaijer/PyGaze | pygaze/libsound.py | Python | gpl-3.0 | 1,025 | 0 |
# Autogenerated with SMOP version 0.23
# main.py ../../assessing-mininet/MATLAB/load_function.m ../../assessing-mininet/MATLAB/process_complete_test_set.m ../../assessing-mininet/MATLAB/process_single_testfile.m ../../assessing-mininet/MATLAB/ProcessAllLogsMain.m
from __future__ import division
from numpy import arange
def strcat(*args):
return ''.join(args)
def load_octave_decoded_file_as_matrix(file_name):
with open(file_name, 'r') as f:
return [ map(float,line.strip().split(' ')) for line in f ]
def get_test_bitrate(crosstraffic):
if crosstraffic:
return arange(4,6,0.25)
else:
return arange(8,12,0.5)
def process_complete_test_set(file_names,output_format,crosstraffic):
from glob import glob
overview_img_file=strcat('overview.',output_format)
mean_bitrate=[]
std_dev_bitrate=[]
mean_delay=[]
std_dev_delay=[]
mean_jitter=[]
std_dev_jitter=[]
mean_packetloss=[]
std_dev_packetloss=[]
print('Starting work on:')
print(file_names)
for f in file_names:
print('in loop, iterating through list of found files...')
#current_file_name_with_ext=f
#bare_file_name=strrep(current_file_name_with_ext,extension_loadfile,'')
#temp_picture_file_name=strcat(bare_file_name,extension_imgfile)
current_picture_file_name=strcat(f,'.jpg')
matrix_to_process=load_octave_decoded_file_as_matrix(f)
parsed_data=process_single_testfile(matrix_to_process,current_picture_file_name,output_format)
mean_bitrate[ii]=mean(parsed_data)
std_dev_bitrate[ii]=std(parsed_data)
mean_delay[ii]=mean(parsed_data[:,2])
std_dev_delay[ii]=std(parsed_data[:,2])
mean_jitter[ii]=mean(parsed_data[:,3])
std_dev_jitter[ii]=std(parsed_data[:,3])
mean_packetloss[ii]=mean(parsed_data[:,4])
std_dev_packetloss[ii]=std(parsed_data[:,4])
bitrate_of_test = get_test_bitrate(crosstraffic)
s_bitrate=min(bitrate_of_test) - bitrate_interval
e_bitrate=max(bitrate_of_test) + bitrate_interval
s_mean_bitrate=min(mean_bitrate) - max(std_dev_bitrate)
e_mean_bitrate=max(mean_bitrate) + max(std_dev_bitrate)
s_mean_jitter=min(mean_jitter) - max(std_dev_jitter)
e_mean_jitter=max(mean_jitter) + max(std_dev_jitter)
s_mean_delay=min(mean_delay) - max(std_dev_delay)
e_mean_delay=max(mean_delay) + max(std_dev_delay)
axis_bitrate=(cat(s_bitrate,e_bitrate,s_mean_bitrate,e_mean_bitrate))
axis_delay=(cat(s_bitrate,e_bitrate,sort(cat(round_(s_mean_delay) - 1,round_(e_mean_delay) + 1))))
axis_jitter=(cat(s_bitrate,e_bitrate,s_mean_jitter,e_mean_jitter))
print('\n\n\n*** START TESTDATA ***\n')
print(bitrate_of_test)
print(mean_bitrate)
print(std_dev_bitrate)
print('\n*** END TESTDATA ***\n\n\n')
subplot(3,1,1)
print(len(bitrate_of_test))
print(len(mean_bitrate))
print(len(std_dev_bitrate))
errorbar(bitrate_of_test,mean_bitrate,std_dev_bitrate,'kx')
title('mean throughput with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('bitrate value [Mbps]')
print(axis_bitrate)
axis(axis_bitrate)
grid('on')
subplot(3,1,2)
errorbar(bitrate_of_test,mean_delay,std_dev_delay,'kx')
title('mean delay with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('delay value [ms]')
axis(axis_delay)
grid('on')
subplot(3,1,3)
errorbar(bitrate_of_test,mean_jitter,std_dev_jitter,'kx')
title('mean jitter with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('jitter value [ms]')
axis(axis_jitter)
grid('on')
aggregatedPicture=figure(1)
set_(aggregatedPicture,'PaperUnits','centimeters')
set_(aggregatedPicture,'PaperSize',cat(30,16))
set_(aggregatedPicture,'PaperPosition',cat(0,0,30,16))
set_(aggregatedPicture,'PaperOrientation','portrait')
saveas(aggregatedPicture,overview_img_file,output_format)
close(aggregatedPicture)
clear('all')
return
def process_single_testfile(matrix,current_picture_file_name,output_format):
t_start=matrix[1][5] * 3600 + matrix[1][6] * 60 + matrix[1][7]
print (matrix[:][5] * 3600 + matrix[:][6] * 60 + matrix[:][7])
t_conv=(matrix[:][5] * 3600 + matrix[:][6] * 60 + matrix[:][7]) - t_start
t_start_s=matrix[1][2] * 3600 + matrix[1][3] * 60 + matrix[1][4]
t_conv_s=(matrix[:][2] * 3600 + matrix[:][3] * 60 + matrix[:][4]) - t_start_s
jj=1
t_int=0
bitrate[jj]=0
delay[jj]=0
jitter[jj]=0
pktloss[jj]=0
for ii in arange(1,len(matrix)).reshape(-1):
if (t_conv[ii] - t_int >= 1):
jj=jj + 1
t_int=t_conv[ii]
bitrate[jj]=matrix[ii][8]
delay[jj]=t_conv[ii] - t_conv_s[ii]
if (ii > 1):
pktloss[jj]=matrix[ii] - matrix[ii - 1] - 1
jitter[jj]=t_conv[ii] - t_conv[ii - 1]
else:
bitrate[jj]=bitrate[jj] + matrix[ii][8]
delay[jj]=mean(cat(delay[jj],(t_conv[ii] - t_conv_s[ii])))
if (ii > 1):
pktloss[jj]=pktloss[jj] + matrix[ii] - matrix[ii - 1] - 1
jitter[jj]=mean(cat(jitter[jj],(t_conv[ii] - t_conv[ii - 1])))
bitrate=bitrate / 125000
return_matrix=matlabarray(cat(bitrate.T,delay.T,jitter.T,pktloss.T))
subplot(2,2,1)
bitrate_u=copy(bitrate)
plot(arange(0,jj - 2),bitrate_u[1:jj - 1],'-')
title('Throughput')
xlabel('time [s]')
ylabel('[Mbps]')
axis(cat(0,max(t_conv),0,round_(max(bitrate_u) * 1.125)))
grid('on')
subplot(2,2,2)
plot(arange(0,len(delay) - 1),delay,'-')
title('Delay')
xlabel('time [s]')
ylabel('[ms]')
axis(cat(0,max(t_conv),min(delay) - 1e-05,max(delay)))
grid('on')
subplot(2,2,3)
plot(arange(0,len(jitter) - 1),jitter,'-')
title('Jitter')
xlabel('time [s]')
ylabel('[ms]')
axis(cat(0,max(t_conv),min(jitter) - max(jitter) * 1.125,max(jitter) * 1.125))
grid('on')
subplot(2,2,4)
d=diff(t_conv)
m=max(d)
hist(d)
title('Inter-departure time Distribution')
xlabel('time [s]')
ylabel('Empirical PDF')
grid('on')
firstPicture=figure(1)
set_(firstPicture,'PaperUnits','centimeters')
set_(firstPicture,'PaperSize',cat(22,18))
set_(firstPicture,'PaperPosition',cat(0,0,22,18))
set_(firstPicture,'PaperOrientation','portrait')
saveas(firstPicture,current_picture_file_name,output_format)
close(firstPicture)
# if (strcmp(log_type,'udp_rcv')):
# subplot(1,1,1)
# packetloss_picture=figure(1)
# set_(packetloss_picture,'PaperUnits','centimeters')
# set_(packetloss_picture,'PaperSize',cat(12,10))
# set_(packetloss_picture,'PaperPosition',cat(0,0,12,10))
# set_(packetloss_picture,'PaperOrientation','portrait')
# plot(arange(0,len(pktloss) - 1),pktloss,'-')
# title('Packet loss')
# xlabel('time [s]')
# ylabel('[pps]')
# axis(cat(sort(cat(0,max(t_conv))),sort(cat(round_(max(pktloss)) + 1,round_(min(pktloss)) - 1))))
# grid('on')
# saveas(packetloss_picture,strcat('pl_',current_picture_file_name),output_format)
# close(packetloss_picture)
return return_matrix
crosstraffic = False
#process_complete_test_set(['/tmp/octave.dat'],'pdf',crosstraffic)
process_single_testfile(load_octave_decoded_file_as_matrix('/tmp/octave.dat'),'pic.jpg',"jpg")
| yossisolomon/assessing-mininet | matlab-to-python.py | Python | gpl-2.0 | 7,459 | 0.024668 |
"""
Plugin for ResolveURL
Copyright (C) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from resolveurl.plugins.__resolve_generic__ import ResolveGeneric
from resolveurl.plugins.lib import helpers
class OoglyResolver(ResolveGeneric):
name = "oogly.io"
domains = ['oogly.io']
pattern = r'(?://|\.)(oogly\.io)/(?:embed-)?([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id),
patterns=[r'''file:\s*"(?P<url>[^"]+\.(?:m3u8|mp4))"'''],
generic_patterns=False,
referer=False)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
| dknlght/dkodi | src/script.module.resolveurl/lib/resolveurl/plugins/oogly.py | Python | gpl-2.0 | 1,391 | 0.001438 |
from audio_pipeline.util import Tag
import re
from audio_pipeline.util import Exceptions
class BaseTag(Tag.Tag):
def extract(self):
super().extract()
if self._value is not None:
self._value = self._value[0]
def set(self, value=Tag.CurrentTag):
if value is not Tag.CurrentTag:
self.value = value
if isinstance(self._value, list):
self.mutagen[self.serialization_name] = [str(val) for val in self._value]
elif self._value:
self.mutagen[self.serialization_name] = [str(self._value)]
else:
if self.serialization_name in self.mutagen:
self.mutagen.pop(self.serialization_name)
class NumberTag(Tag.NumberTagMixin, BaseTag):
def __init__(self, total_tag, *args):
self._total = None
self.serialization_total = total_tag
super().__init__(*args)
def extract(self):
# get the number
super().extract()
if self._value:
self._value = int(self._value)
# get the total
if self.serialization_total in self.mutagen:
self._total = int(self.mutagen[self.serialization_total][0])
@property
def value(self):
if self._value:
return self._value
@value.setter
def value(self, val):
if val is None:
self._value = None
elif isinstance(val, int):
self._value = val
elif isinstance(val, str) and self._value_match.match(val):
# valid-looking num/total string
self._value = int(val.split('/')[0])
elif isinstance(val, str):
try:
self._value = int(val)
except ValueError:
raise Exceptions.InvalidTagValueError(str(val) + " is not a valid " + self.name)
else:
raise Exceptions.InvalidTagValueError(str(val) + " is not a valid " + self.name)
class DiscNumberTag(NumberTag):
def __str__(self):
if self._value and self._total:
val = str(self._value) + "/" + str(self._total)
elif self._value:
val = str(self._value)
else:
val = ""
return val
class ReleaseDateTag(Tag.ReleaseDateMixin, BaseTag):
def __init__(self, *args):
super().__init__(*args)
self._normalize()
class Format(Tag.MetadataFormat):
"""
A static class used to extract and save Vorbis-formated metadata tags.
"""
# release-level serialization names
_album = "album"
_album_artist = "albumartist"
_release_date = "date"
_label = "label"
_mbid = "mbid"
_mbid_p = "musicbrainz_albumid"
_country = "releasecountry"
_release_type = "releasetype"
_media_format = "media"
# track-level serialization names
_title = "title"
_artist = "artist"
_disc_total = "disctotal"
_disc_total_picard = "totaldiscs"
_disc_num = "discnumber"
_track_total = "tracktotal"
_track_total_picard = "totaltracks"
_track_num = "tracknumber"
_length = "Length"
_acoustid = "ACOUSTID_ID"
_track_mbid = 'MUSICBRAINZ_RELEASETRACKID'
_recording_mbid = 'MUSICBRAINZ_TRACKID'
################
# release-level tags
################
@classmethod
def album(cls, tags):
tag = BaseTag(cls._album_name, cls._album, tags)
return tag
@classmethod
def album_artist(cls, tags):
tag = BaseTag(cls._album_artist_name, cls._album_artist, tags)
return tag
@classmethod
def release_date(cls, tags):
tag = ReleaseDateTag(cls._release_date_name, cls._release_date, tags)
return tag
@classmethod
def label(cls, tags):
tag = BaseTag(cls._label_name, cls._label, tags)
return tag
@classmethod
def mbid(cls, tags):
tag = BaseTag(cls._mbid_name, cls._mbid_p, tags)
if tag.value is None:
tag = BaseTag(cls._mbid_name, cls._mbid, tags)
return tag
@classmethod
def country(cls, tags):
tag = BaseTag(cls._country_name, cls._country, tags)
return tag
@classmethod
def release_type(cls, tags):
tag = BaseTag(cls._type_name, cls._release_type, tags)
return tag
@classmethod
def media_format(cls, tags):
tag = BaseTag(cls._media_format_name, cls._media_format, tags)
return tag
######################
# track-level tags
######################
@classmethod
def title(cls, tags):
tag = BaseTag(cls._title_name, cls._title, tags)
return tag
@classmethod
def artist(cls, tags):
tag = BaseTag(cls._artist_name, cls._artist, tags)
return tag
@classmethod
def disc_num(cls, tags):
tag = DiscNumberTag(cls._disc_total_picard, cls._disc_num_name, cls._disc_num, tags)
if tag.total is None:
tag = DiscNumberTag(cls._disc_total, cls._disc_num_name, cls._disc_num, tags)
return tag
@classmethod
def track_num(cls, tags):
tag = NumberTag(cls._track_total_picard, cls._track_num_name, cls._track_num, tags)
if tag.total is None:
tag = NumberTag(cls._track_total, cls._track_num_name, cls._track_num, tags)
return tag
@classmethod
def acoustid(cls, tags):
tag = BaseTag(cls._acoustid_name, cls._acoustid, tags)
return tag
@classmethod
def recording_mbid(cls, tags):
tag = BaseTag(cls._recording_mbid_name, cls._recording_mbid, tags)
return tag
@classmethod
def track_mbid(cls, tags):
tag = BaseTag(cls._track_mbid_name, cls._track_mbid, tags)
return tag
#########################
# custom tags
#########################
@classmethod
def custom_tag(cls, name, tags):
tag = BaseTag(name, name, tags)
if not tag.value:
serialization_name = re.sub("\s", "_", name)
under_tag = BaseTag(name, serialization_name, tags)
tag.value = under_tag.value
tag.save()
return tag
| hidat/audio_pipeline | audio_pipeline/util/format/Vorbis.py | Python | mit | 6,254 | 0.003997 |
from django.http import Http404
from django.shortcuts import render
from directory.forms import SearchForm
def home(request):
searchform = SearchForm()
return render(request, 'onigiri/index.html', {'searchform' : searchform})
| cloudiirain/onigiri | onigiri/views.py | Python | gpl-3.0 | 235 | 0.008511 |
#!/usr/bin/env python
from raspledstrip.ledstrip import *
from raspledstrip.animation import *
from raspledstrip.color import Color
import requests
import json
import time
import sys
import traceback
# Things that should be configurable
ledCount = 32 * 5
api = 'http://lumiere.lighting/'
waitTime = 6
class Lumiere:
"""
Class to handle getting light information.
"""
def __init__(self):
"""
Constructor.
"""
self.ledCount = ledCount
self.base_url = api
self.currentID = None
self.ledArray = []
self.waitTime = waitTime
self.led = LEDStrip(ledCount)
self.led.all_off()
def listen(self):
"""
Handles the continual checking.
"""
while True:
try:
self.queryLights()
time.sleep(self.waitTime)
except (KeyboardInterrupt, SystemExit):
raise
except:
print traceback.format_exc()
def updateLights(self):
"""
Change the lights.
"""
self.fillArray()
# Animate
anim = FireFlies(self.led, self.ledArray, 1, 1, 0, self.led.lastIndex)
for i in range(50):
anim.step()
self.led.update()
# Final fill
for li, l in enumerate(self.ledArray):
self.led.set(li, l)
self.led.update()
def fillArray(self):
"""
Fill up LED count with all the colors.
"""
self.ledArray = []
ledArray = []
length = len(self.current['colors'])
for x in range(0, self.ledCount - 1):
ledArray.append(self.hex_to_rgb(self.current['colors'][x % length]))
for li, l in enumerate(ledArray):
self.ledArray.append(Color(l[0], l[1], l[2]))
def queryLights(self):
"""
Make request to API.
"""
r = requests.get('%sapi/colors' % (self.base_url))
self.current = r.json()
# Only update if new record
if self.currentID is None or self.currentID != self.current['_id']:
self.currentID = self.current['_id']
self.updateLights()
def hex_to_rgb(self, value):
"""
Turns hex value to RGB tuple.
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
if __name__ == '__main__':
lumiere = Lumiere()
lumiere.listen()
| lumiere-lighting/lumiere-node-raspberry-pi | lumiere.old.py | Python | mit | 2,222 | 0.011701 |
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import Group, User
from django.http import HttpRequest, QueryDict
from hs_core.hydroshare import resource
from hs_core import hydroshare
from hs_script_resource.models import ScriptSpecificMetadata, ScriptResource
from hs_script_resource.receivers import script_pre_create, script_metadata_pre_create_handler, script_metadata_pre_update_handler
class TestScriptResource(TransactionTestCase):
def setUp(self):
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'scrawley@byu.edu',
username='scrawley',
first_name='Shawn',
last_name='Crawley',
superuser=False,
groups=[self.group]
)
self.allowance = 0.00001
self.resScript = hydroshare.create_resource(
resource_type='ScriptResource',
owner=self.user,
title='Test R Script Resource',
keywords=['kw1', 'kw2']
)
def test_script_res_specific_metadata(self):
#######################
# Class: ScriptSpecificMetadata
#######################
# no ScriptSpecificMetadata obj
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 0)
# create 1 ScriptSpecificMetadata obj with required params
resource.create_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata', scriptLanguage='R',
languageVersion='3.5', scriptVersion='1.0',
scriptDependencies='None', scriptReleaseDate='2015-12-01 00:00',
scriptCodeRepository='http://www.google.com')
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 1)
# may not create additional instance of ScriptSpecificMetadata
with self.assertRaises(Exception):
resource.create_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata', scriptLanguage='R',
languageVersion='3.5', scriptVersion='1.0',
scriptDependencies='None', scriptReleaseDate='12/01/2015',
scriptCodeRepository='http://www.google.com')
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata',
element_id=ScriptSpecificMetadata.objects.first().id,
scriptLanguage='python',
languageVersion='2.7')
self.assertEqual(ScriptSpecificMetadata.objects.first().scriptLanguage, 'python')
self.assertEqual(ScriptSpecificMetadata.objects.first().languageVersion, '2.7')
# delete ScriptSpecificMetadata obj
resource.delete_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata',
element_id=ScriptSpecificMetadata.objects.first().id)
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 0)
def test_receivers(self):
request = HttpRequest()
# ScriptSpecificMetadata
request.POST = {'scriptLanguage': 'R', 'languageVersion': '3.5'}
data = script_metadata_pre_create_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertTrue(data["is_valid"])
request.POST = None
data = script_metadata_pre_create_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertFalse(data["is_valid"])
data = script_pre_create(sender=ScriptResource,
metadata=[], source_names=[],
files=None)
self.assertEqual(data[0]['scriptspecificmetadata'], {})
request.POST = {'scriptLanguage': 'R', 'languageVersion': '3.5'}
data = script_metadata_pre_update_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertTrue(data["is_valid"])
request.POST = None
data = script_metadata_pre_update_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertFalse(data["is_valid"])
def test_bulk_metadata_update(self):
# here we are testing the update() method of the ScriptMetaData class
# check that there are no extended metadata elements at this point
self.assertEqual(self.resScript.metadata.program, None)
# create program metadata
self.resScript.metadata.update([{'scriptspecificmetadata': {'scriptLanguage': 'R',
'languageVersion': '3.5',
'scriptVersion': '1.0',
'scriptDependencies': 'None',
'scriptReleaseDate':
'2015-12-01 00:00',
'scriptCodeRepository':
'http://www.google.com'}}],
self.user)
# check that there is now extended metadata elements at this point
self.assertNotEqual(self.resScript.metadata.program, None)
# test that we can also update core metadata using update()
# there should be a creator element
self.assertEqual(self.resScript.metadata.creators.count(), 1)
self.resScript.metadata.update([{'creator': {'name': 'Second Creator'}},
{'creator': {'name': 'Third Creator'}},
{'scriptspecificmetadata': {'scriptVersion': '1.5'}}],
self.user)
# there should be 2 creators at this point (previously existed creator gets
# delete as part of the update() call
self.assertEqual(self.resScript.metadata.creators.count(), 2)
# check that there is now extended metadata elements at this point
self.assertNotEqual(self.resScript.metadata.program, None)
| hydroshare/hydroshare | hs_script_resource/tests/test_script_resource.py | Python | bsd-3-clause | 7,034 | 0.004265 |
"""
--- Day 25: The Halting Problem ---
Following the twisty passageways deeper and deeper into the CPU, you finally reach the core of the computer. Here, in the expansive central chamber, you find a grand apparatus that fills the entire room, suspended nanometers above your head.
You had always imagined CPUs to be noisy, chaotic places, bustling with activity. Instead, the room is quiet, motionless, and dark.
Suddenly, you and the CPU's garbage collector startle each other. "It's not often we get many visitors here!", he says. You inquire about the stopped machinery.
"It stopped milliseconds ago; not sure why. I'm a garbage collector, not a doctor." You ask what the machine is for.
"Programs these days, don't know their origins. That's the Turing machine! It's what makes the whole computer work." You try to explain that Turing machines are merely models of computation, but he cuts you off. "No, see, that's just what they want you to think. Ultimately, inside every CPU, there's a Turing machine driving the whole thing! Too bad this one's broken. We're doomed!"
You ask how you can help. "Well, unfortunately, the only way to get the computer running again would be to create a whole new Turing machine from scratch, but there's no way you can-" He notices the look on your face, gives you a curious glance, shrugs, and goes back to sweeping the floor.
You find the Turing machine blueprints (your puzzle input) on a tablet in a nearby pile of debris. Looking back up at the broken Turing machine above, you can start to identify its parts:
A tape which contains 0 repeated infinitely to the left and right.
A cursor, which can move left or right along the tape and read or write values at its current position.
A set of states, each containing rules about what to do based on the current value under the cursor.
Each slot on the tape has two possible values: 0 (the starting value for all slots) and 1. Based on whether the cursor is pointing at a 0 or a 1, the current state says what value to write at the current position of the cursor, whether to move the cursor left or right one slot, and which state to use next.
For example, suppose you found the following blueprint:
Begin in state A.
Perform a diagnostic checksum after 6 steps.
In state A:
If the current value is 0:
- Write the value 1.
- Move one slot to the right.
- Continue with state B.
If the current value is 1:
- Write the value 0.
- Move one slot to the left.
- Continue with state B.
In state B:
If the current value is 0:
- Write the value 1.
- Move one slot to the left.
- Continue with state A.
If the current value is 1:
- Write the value 1.
- Move one slot to the right.
- Continue with state A.
Running it until the number of steps required to take the listed diagnostic checksum would result in the following tape configurations (with the cursor marked in square brackets):
... 0 0 0 [0] 0 0 ... (before any steps; about to run state A)
... 0 0 0 1 [0] 0 ... (after 1 step; about to run state B)
... 0 0 0 [1] 1 0 ... (after 2 steps; about to run state A)
... 0 0 [0] 0 1 0 ... (after 3 steps; about to run state B)
... 0 [0] 1 0 1 0 ... (after 4 steps; about to run state A)
... 0 1 [1] 0 1 0 ... (after 5 steps; about to run state B)
... 0 1 1 [0] 1 0 ... (after 6 steps; about to run state A)
The CPU can confirm that the Turing machine is working by taking a diagnostic checksum after a specific number of steps (given in the blueprint). Once the specified number of steps have been executed, the Turing machine should pause; once it does, count the number of times 1 appears on the tape. In the above example, the diagnostic checksum is 3.
Recreate the Turing machine and save the computer! What is the diagnostic checksum it produces once it's working again?
--- Part Two ---
The Turing machine, and soon the entire computer, springs back to life. A console glows dimly nearby, awaiting your command.
> reboot printer
Error: That command requires priority 50. You currently have priority 0.
You must deposit 50 stars to increase your priority to the required level.
The console flickers for a moment, and then prints another message:
Star accepted.
You must deposit 49 stars to increase your priority to the required level.
The garbage collector winks at you, then continues sweeping.
You deposit all fifty stars and reboot the printer. Suddenly, everything seems a lot less pixelated than before.
"--raise your priority level enough to send the reboot command and... hey look, it's printing! I'll bring it to Santa. Thanks!" She runs off.
Congratulations! You've finished every puzzle in Advent of Code 2017! I hope you had as much fun solving them as I had making them for you. I'd love to hear about your adventure; you can get in touch with me via contact info on my website or through Twitter.
If you'd like to see more things like this in the future, please consider supporting Advent of Code and sharing it with others.
To hear about future projects, you can follow me on Twitter.
I've highlighted the easter eggs in each puzzle, just in case you missed any. Hover your mouse over them, and the easter egg will appear.
"""
class TuringMachine():
def __init__(self, state):
self.state = state
self.pos = 0
self.ones = []
def _move(self, move, next_state=None, op=None):
if next_state is not None:
self.state = next_state
if op == 1:
self.ones.append(self.pos)
elif op == 0:
self.ones.pop(self.ones.index(self.pos))
self.pos += move
class TestTuringMachine(TuringMachine):
def move(self):
value = 1 if self.pos in self.ones else 0
if self.state == 'A':
if value == 0:
self._move(1, 'B', 1)
else:
self._move(-1, 'B', 0)
else:
if value == 0:
self._move(-1, 'A', 1)
else:
self._move(+1, 'A')
def test1():
machine = TestTuringMachine('A')
for i in range(6):
machine.move()
assert 3 == len(machine.ones)
class Part1TuringMachine(TuringMachine):
def move(self):
value = 1 if self.pos in self.ones else 0
if self.state == 'A':
if value == 0:
self._move(1, 'B', 1)
else:
self._move(-1, 'C', 0)
elif self.state == 'B':
if value == 0:
self._move(-1, 'A', 1)
else:
self._move(1, 'D')
elif self.state == 'C':
if value == 0:
self._move(-1, 'B')
else:
self._move(-1, 'E', 0)
elif self.state == 'D':
if value == 0:
self._move(1, 'A', 1)
else:
self._move(1, 'B', 0)
elif self.state == 'E':
if value == 0:
self._move(-1, 'F', 1)
else:
self._move(-1, 'C')
elif self.state == 'F':
if value == 0:
self._move(1, 'D', 1)
else:
self._move(1, 'A')
def part1():
machine = Part1TuringMachine('A')
for i in range(12667664):
machine.move()
print( len(machine.ones))
if __name__ == '__main__':
# test1()
part1()
| bbglab/adventofcode | 2017/iker/day25.py | Python | mit | 7,429 | 0.002692 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.