repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
jesselegg/panda_client_python
panda/__init__.py
1
2098
from .request import PandaRequest from .models import Video, Cloud, Encoding, Profile, Notifications, PandaDict from .models import GroupRetriever, SingleRetriever from .models import PandaError from .upload_session import UploadSession import logging logging.getLogger(__name__).addHandler(logging.NullHandler()) class Panda(object): def __init__(self, access_key, secret_key, cloud_id=None, api_host='api.pandastream.com', api_port=443): self.cloud_id = cloud_id self.access_key = access_key self.secret_key = secret_key self.api_host = api_host self.api_port = api_port self.api_version = 2 self.videos = GroupRetriever(self, Video) self.clouds = GroupRetriever(self, Cloud) self.encodings = GroupRetriever(self, Encoding) self.profiles = GroupRetriever(self, Profile) self.notifications = SingleRetriever(self, Notifications) def credentials(self): cred = [ 'cloud_id', 'access_key', 'secret_key', 'api_host', 'api_port', 'api_version' ] return {key: self.__dict__[key] for key in cred } def get(self, request_path, params={}): return PandaRequest('GET', request_path, self.credentials(), params).send() def post(self, request_path, params={}): return PandaRequest('POST', request_path, self.credentials(), params).send() def put(self, request_path, params={}): return PandaRequest('PUT', request_path, self.credentials(), params).send() def delete(self, request_path, params={}): return PandaRequest('DELETE', request_path, self.credentials(), params).send() def signed_params(self, verb, path, timestamp=None, params={}): return PandaRequest(verb, path, self.credentials(), params, timestamp).signed_params() def upload_session(self, path, **kwargs): return UploadSession(self, path, **kwargs) def cloud_details(self): return SingleRetriever(self, PandaDict, "/clouds/{0}".format(self.cloud_id)).get()
mit
alphaBenj/zipline
zipline/finance/slippage.py
5
16230
# # Copyright 2017 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division from abc import abstractmethod import math import numpy as np from pandas import isnull from six import with_metaclass from toolz import merge from zipline.assets import Equity, Future from zipline.errors import HistoryWindowStartsBeforeData from zipline.finance.constants import ROOT_SYMBOL_TO_ETA from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta from zipline.finance.transaction import create_transaction from zipline.utils.cache import ExpiringCache from zipline.utils.dummy import DummyMapping SELL = 1 << 0 BUY = 1 << 1 STOP = 1 << 2 LIMIT = 1 << 3 SQRT_252 = math.sqrt(252) DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025 DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05 class LiquidityExceeded(Exception): pass def fill_price_worse_than_limit_price(fill_price, order): """ Checks whether the fill price is worse than the order's limit price. Parameters ---------- fill_price: float The price to check. order: zipline.finance.order.Order The order whose limit price to check. Returns ------- bool: Whether the fill price is above the limit price (for a buy) or below the limit price (for a sell). """ if order.limit: # this is tricky! if an order with a limit price has reached # the limit price, we will try to fill the order. do not fill # these shares if the impacted price is worse than the limit # price. return early to avoid creating the transaction. # buy order is worse if the impacted price is greater than # the limit price. sell order is worse if the impacted price # is less than the limit price if (order.direction > 0 and fill_price > order.limit) or \ (order.direction < 0 and fill_price < order.limit): return True return False class SlippageModel(with_metaclass(FinancialModelMeta)): """Abstract interface for defining a slippage model. """ # Asset types that are compatible with the given model. allowed_asset_types = (Equity, Future) def __init__(self): self._volume_for_bar = 0 @property def volume_for_bar(self): return self._volume_for_bar @abstractmethod def process_order(self, data, order): """Process how orders get filled. Parameters ---------- data : BarData The data for the given bar. order : Order The order to simulate. Returns ------- execution_price : float The price to execute the trade at. execution_volume : int The number of shares that could be filled. This may not be all the shares ordered in which case the order will be filled over multiple bars. """ pass def simulate(self, data, asset, orders_for_asset): self._volume_for_bar = 0 volume = data.current(asset, "volume") if volume == 0: return # can use the close price, since we verified there's volume in this # bar. price = data.current(asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END dt = data.current_dt for order in orders_for_asset: if order.open_amount == 0: continue order.check_triggers(price, dt) if not order.triggered: continue txn = None try: execution_price, execution_volume = \ self.process_order(data, order) if execution_price is not None: txn = create_transaction( order, data.current_dt, execution_price, execution_volume ) except LiquidityExceeded: break if txn: self._volume_for_bar += abs(txn.amount) yield order, txn def asdict(self): return self.__dict__ class EquitySlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)): """ Base class for slippage models which only support equities. """ allowed_asset_types = (Equity,) class FutureSlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)): """ Base class for slippage models which only support futures. """ allowed_asset_types = (Future,) class VolumeShareSlippage(SlippageModel): """ Model slippage as a function of the volume of contracts traded. """ def __init__(self, volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT, price_impact=0.1): super(VolumeShareSlippage, self).__init__() self.volume_limit = volume_limit self.price_impact = price_impact def __repr__(self): return """ {class_name}( volume_limit={volume_limit}, price_impact={price_impact}) """.strip().format(class_name=self.__class__.__name__, volume_limit=self.volume_limit, price_impact=self.price_impact) def process_order(self, data, order): volume = data.current(order.asset, "volume") max_volume = self.volume_limit * volume # price impact accounts for the total volume of transactions # created against the current minute bar remaining_volume = max_volume - self.volume_for_bar if remaining_volume < 1: # we can't fill any more transactions raise LiquidityExceeded() # the current order amount will be the min of the # volume available in the bar or the open amount. cur_volume = int(min(remaining_volume, abs(order.open_amount))) if cur_volume < 1: return None, None # tally the current amount into our total amount ordered. # total amount will be used to calculate price impact total_volume = self.volume_for_bar + cur_volume volume_share = min(total_volume / volume, self.volume_limit) price = data.current(order.asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END simulated_impact = volume_share ** 2 \ * math.copysign(self.price_impact, order.direction) \ * price impacted_price = price + simulated_impact if fill_price_worse_than_limit_price(impacted_price, order): return None, None return ( impacted_price, math.copysign(cur_volume, order.direction) ) class FixedSlippage(SlippageModel): """ Model slippage as a fixed spread. Parameters ---------- spread : float, optional spread / 2 will be added to buys and subtracted from sells. """ def __init__(self, spread=0.0): super(FixedSlippage, self).__init__() self.spread = spread def __repr__(self): return '{class_name}(spread={spread})'.format( class_name=self.__class__.__name__, spread=self.spread, ) def process_order(self, data, order): price = data.current(order.asset, "close") return ( price + (self.spread / 2.0 * order.direction), order.amount ) class MarketImpactBase(SlippageModel): """ Base class for slippage models which compute a simulated price impact according to a history lookback. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000 def __init__(self): super(MarketImpactBase, self).__init__() self._window_data_cache = ExpiringCache() @abstractmethod def get_txn_volume(self, data, order): """ Return the number of shares we would like to order in this minute. Parameters ---------- data : BarData order : Order Return ------ int : the number of shares """ raise NotImplementedError('get_txn_volume') @abstractmethod def get_simulated_impact(self, order, current_price, current_volume, txn_volume, mean_volume, volatility): """ Calculate simulated price impact. Parameters ---------- order : The order being processed. current_price : Current price of the asset being ordered. current_volume : Volume of the asset being ordered for the current bar. txn_volume : Number of shares/contracts being ordered. mean_volume : Trailing ADV of the asset. volatility : Annualized daily volatility of volume. Return ------ int : impact on the current price. """ raise NotImplementedError('get_simulated_impact') def process_order(self, data, order): if order.open_amount == 0: return None, None minute_data = data.current(order.asset, ['volume', 'high', 'low']) mean_volume, volatility = self._get_window_data(data, order.asset, 20) # Price to use is the average of the minute bar's open and close. price = np.mean([minute_data['high'], minute_data['low']]) volume = minute_data['volume'] if not volume: return None, None txn_volume = int( min(self.get_txn_volume(data, order), abs(order.open_amount)) ) # If the computed transaction volume is zero or a decimal value, 'int' # will round it down to zero. In that case just bail. if txn_volume == 0: return None, None if mean_volume == 0 or np.isnan(volatility): # If this is the first day the contract exists or there is no # volume history, default to a conservative estimate of impact. simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT else: simulated_impact = self.get_simulated_impact( order=order, current_price=price, current_volume=volume, txn_volume=txn_volume, mean_volume=mean_volume, volatility=volatility, ) impacted_price = \ price + math.copysign(simulated_impact, order.direction) if fill_price_worse_than_limit_price(impacted_price, order): return None, None return impacted_price, math.copysign(txn_volume, order.direction) def _get_window_data(self, data, asset, window_length): """ Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility) """ try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, 'volume', window_length + 1, '1d', ) close_history = data.history( asset, 'close', window_length + 1, '1d', ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = close_history[:-1].pct_change()[1:].std( skipna=False, ) values = { 'volume': volume_history[:-1].mean(), 'close': close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values['volume'], values['close'] class VolatilityVolumeShare(MarketImpactBase): """ Model slippage for futures contracts according to the following formula: new_price = price + (price * MI / 10000), where 'MI' is market impact, which is defined as: MI = eta * sigma * sqrt(psi) Eta is a constant which varies by root symbol. Sigma is 20-day annualized volatility. Psi is the volume traded in the given bar divided by 20-day ADV. Parameters ---------- volume_limit : float Maximum percentage (as a decimal) of a bar's total volume that can be traded. eta : float or dict Constant used in the market impact formula. If given a float, the eta for all futures contracts is the same. If given a dictionary, it must map root symbols to the eta for contracts of that symbol. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000 allowed_asset_types = (Future,) def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA): super(VolatilityVolumeShare, self).__init__() self.volume_limit = volume_limit # If 'eta' is a constant, use a dummy mapping to treat it as a # dictionary that always returns the same value. # NOTE: This dictionary does not handle unknown root symbols, so it may # be worth revisiting this behavior. if isinstance(eta, (int, float)): self._eta = DummyMapping(float(eta)) else: # Eta is a dictionary. If the user's dictionary does not provide a # value for a certain contract, fall back on the pre-defined eta # values per root symbol. self._eta = merge(ROOT_SYMBOL_TO_ETA, eta) def __repr__(self): if isinstance(self._eta, DummyMapping): # Eta is a constant, so extract it. eta = self._eta['dummy key'] else: eta = '<varies>' return '{class_name}(volume_limit={volume_limit}, eta={eta})'.format( class_name=self.__class__.__name__, volume_limit=self.volume_limit, eta=eta, ) def get_simulated_impact(self, order, current_price, current_volume, txn_volume, mean_volume, volatility): eta = self._eta[order.asset.root_symbol] psi = txn_volume / mean_volume market_impact = eta * volatility * math.sqrt(psi) # We divide by 10,000 because this model computes to basis points. # To convert from bps to % we need to divide by 100, then again to # convert from % to fraction. return (current_price * market_impact) / 10000 def get_txn_volume(self, data, order): volume = data.current(order.asset, 'volume') return volume * self.volume_limit
apache-2.0
philscher/gkc
doc/scripts/grid_BoundaryDomain.py
1
3530
from pylab import * import random import matplotlib.lines as lines import matplotlib.patches as patches #import matplotlib.text as text import matplotlib.collections as collections import matplotlib.units as units import numpy as np import matplotlib.pyplot as plt import matplotlib from matplotlib.collections import PatchCollection import matplotlib.path as mpath import matplotlib.patches as mpatches import matplotlib.lines as mlines import pylab # Add MPI communication and information exchange fig = plt.figure() ax = fig.add_subplot(111) ax.set_frame_on(False) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) #ax.fax1 = plt.axes(frameon=False) def plotMeasure(start_xy, eo_xy, text): l_y = linspace(start_xy[1], eo_xy[1]+3., 128) plot(ones(128)*start_xy[0], l_y, 'k--') plot(ones(128)*eo_xy[0] , l_y, 'k--') l_x = linspace(start_xy[0], eo_xy[0], 128) plot(l_x, ones(128)*eo_xy[1], 'k-') pylab.text(0.5*(start_xy[0]+eo_xy[0]), eo_xy[1]+3., text, ha='center') # Second Domain rect_1 = patches.Rectangle( (-50, 0.), width=50, height=20, alpha=0.2, axes=ax, color='b') ax.add_patch(rect_1) plotMeasure((-50., 20.), (0., 30.), "RxLD") rect_1 = mpatches.Rectangle( (-60., 0.), width=70, height=20, alpha=0.2, axes=ax) rect_1.set_fc('g') ax.add_patch(rect_1) plotMeasure((-60., 20.), (10., 45.), "RxLB") # First Domain rect_1 = patches.Rectangle( (-100, -20), width=50, height=20, alpha=0.2, axes=ax, color='b') ax.add_patch(rect_1) plotMeasure((0., -20.), (50., -35.), "RxLD") rect_1 = mpatches.Rectangle( (-110., -20), width=70, height=20, alpha=0.2, axes=ax) rect_1.set_fc('g') ax.add_patch(rect_1) plotMeasure((-10., -20.), (60., -50.), "RxLB") # Third rect_1 = patches.Rectangle( (0., -20), width=50, height=20, alpha=0.2, axes=ax, color='b') ax.add_patch(rect_1) rect_1 = mpatches.Rectangle( (-10., -20), width=70, height=20, alpha=0.2, axes=ax) rect_1.set_fc('g') ax.add_patch(rect_1) # Fourth rect_1 = patches.Rectangle( (50, 0.), width=50, height=20, alpha=0.2, axes=ax, color='b') ax.add_patch(rect_1) rect_1 = mpatches.Rectangle( (40., 0.), width=70, height=20, alpha=0.2, axes=ax) rect_1.set_fc('g') ax.add_patch(rect_1) plotMeasure((-100., 0.), (100., 65.), "RxGD") plotMeasure((-110., 0.), (110., 85.), "RxGB") # Manually set legend rect_1 = patches.Rectangle( (-120., -40), width=12, height=8, alpha=0.2, axes=ax, color='b') ax.add_patch(rect_1) pylab.text(-100, -36, "Domain", ha='left', va='center') rect_1 = mpatches.Rectangle( (-120., -54), width=12, height=8, alpha=0.2, axes=ax) rect_1.set_fc('g') ax.add_patch(rect_1) pylab.text(-100, -50, "Boundary Domain", ha='left', va='center') #rect_1 = patches.Rectangle( (-100, 20.), width=200, height=20, alpha=0.2, axes=ax, color='b') #rect_1.set_fc('r') #ax.add_patch(rect_1) #rect_1 = mpatches.Rectangle( (-110., 20.), width=220, height=20, alpha=0.2, axes=ax) #rect_1.set_fc('g') #ax.add_patch(rect_1) #rect_2 = mpatches.Rectangle( (10, 10), width=120, height=20, alpha=0.2, axes=ax) #rect_2 = mpatches.Rectangle( (10, 10), width=120, height=20, alpha=0.2, axes=ax) #rect_2 = mpatches.Rectangle( (10, 10), width=120, height=20, alpha=0.2, axes=ax) #rect_2.set_fc('r') #ax.add_artist(rect_2) #rect_2 = mpatches.Circle( (0, 0), 0.5, alpha=0.2, axes=ax, color='r') #rect_2.set_fc('r') #ax.add_artist(rect_2) #xlim((-150, 150)) #ylim((-100, 100)) #collection.set_array(np.array(colors)) #ax.add_collection(collection) savefig("Grid_BoundaryDomain.png", bbox_inches='tight')
gpl-3.0
abouquin/django-megaraetc
etc/views.py
1
78417
from django.http import HttpResponse from django.http import HttpResponseRedirect from django.shortcuts import render, render_to_response from django.core.urlresolvers import reverse from .forms import AtmosphericConditionsForm, ObservationalSetupForm from .forms import TargetForm, InstrumentForm from .forms import OutputSetupForm, UploadFileForm from .models import PhotometricFilter, SeeingTemplate from .models import SpectralTemplate, VPHSetup # from .models import MyModel #, Document from justcalc import calc # from plot1 import plot_and_save_new, plot_and_save2_new from plot1_bokeh import bokehplot1 import numpy # import matplotlib # matplotlib.use('Agg') # # import mpld3 # from mpld3 import plugins # /home/pica/Documents/virt_django/django_megara import time from time import strftime import os from django.conf import settings #or from my_project import settings def compute5(request): print request.POST['stype'] print '### LOG: Entered compute5' if request.POST['stype'] == 'P': isize_val = 'A' size_val = 1.0 radius_val = 1.0 else: if request.POST['isize'] == 'A': isize_val = request.POST['isize'] size_val = request.POST['size'] radius_val = 1.0 else: isize_val = request.POST['isize'] radius_val = float(request.POST['radius']) size_val = numpy.pi * (radius_val ** 2) mag_val = float(request.POST['contmagval']) if request.POST['contmagflux'] == 'M' else 20.0 fc_val = 1e-16 if request.POST['contmagflux'] == 'M' else float(request.POST['contfluxval']) fluxt_val = request.POST['iflux'] if fluxt_val == "C": resolvedline_val = "N" fline_val = 1e-13 wline_val = 6562.8 fwhmline_val = 6 nfwhmline_val = 1.0 cnfwhmline_val = 1.0 else: fline_val = float(request.POST['lineflux']) wline_val = float(request.POST['linewave']) nfwhmline_val = float(request.POST['lineap']) cnfwhmline_val = float(request.POST['contap']) if fluxt_val == "L" and request.POST['rline'] == "N": resolvedline_val = "N" fwhmline_val = 6 elif fluxt_val == "L" and request.POST['rline'] == "Y": resolvedline_val = "Y" fwhmline_val = float(request.POST['linefwhm']) querybandc = PhotometricFilter.objects.filter(pk=request.POST[ 'pfilter']).values() # get row with the values at primary key bandc_val = querybandc[0]['name'] # entry_filter_cwl = querybandc[0]['cwl'] # entry_filter_width = querybandc[0]['path'] queryvph = VPHSetup.objects.filter(pk=request.POST['vph']).values() vph_val = queryvph[0]['name'] if vph_val == '-empty-': # Deals with -empty- VPH Setup # vph_val = 'MR-UB' outtext = "WARNING: VPH Setup is -empty-! Choose a VPH." texti = " " textoc = " " textol = " " else: entry_vph_fwhm = queryvph[0]['fwhm'] entry_vph_disp = queryvph[0]['dispersion'] entry_vph_deltab = queryvph[0]['deltab'] entry_vph_lambdac = queryvph[0]['lambdac'] entry_vph_relatedband = queryvph[0]['relatedband'] entry_vph_lambdab = queryvph[0]['lambda_b'] entry_vph_lambdae = queryvph[0]['lambda_e'] entry_vph_specconf = queryvph[0]['specconf'] # vphfeatures = [entry_vph_fwhm,entry_vph_disp,entry_vph_deltab,entry_vph_lambdac,\ # entry_vph_relatedband,entry_vph_lambdab,entry_vph_lambdae,entry_vph_specconf] # filtercar2 = ["0","0",entry_vph_lambdab,entry_vph_lambdae] spec = request.POST['spectype'] if fluxt_val == "L": if spec == '7': # NGC1068 spec = '47' # NGC1068 (smooth) elif spec == '20': # Orion spec = '48' # Orion (smooth) elif spec == '21': # PN spec = '49' # PN (smooth) elif spec == '6': # Sc spec = '50' # Sc (smooth) elif spec == '9': # Starburst1 spec = '51' # Starburst1 (smooth) elif spec == '10': # Starburst2 spec = '52' # Starburst2 (smooth) elif spec == '11': # Starburst3 spec = '53' # Starburst3 (smooth) elif spec == '12': # Starburst4 spec = '54' # Starburst4 (smooth) elif spec == '13': # Starburst5 spec = '55' # Starburst5 (smooth) elif spec == '14': # Starburst6 spec = '56' # Starburst6 (smooth) elif spec == '19': # Sy2 spec = '57' # Sy2 (smooth) queryspec = SpectralTemplate.objects.filter(pk=spec).values() entry_spec_name = queryspec[0]['name'] spect_val = entry_spec_name skycond_val = request.POST['skycond'] moon_val = request.POST['moonph'] airmass_val = float(request.POST['airmass']) queryseeing = SeeingTemplate.objects.filter(pk=request.POST['seeing']).values() seeing_val = queryseeing[0]['name'] numframes_val = float(request.POST['numframes']) # Cope for cmode variations cmode_val = request.POST['cmode'] if cmode_val == 'T': exptimepframe_val = float(request.POST['exptimepframe']) snr_val = 10 elif cmode_val == 'S': exptimepframe_val = 3600 snr_val = float(request.POST['exptimepframe']) nsbundles_val = int(request.POST['nsbundles']) plotflag_val = request.POST['plotflag'] if request.POST['stype'] == 'P' and request.POST['batchyesno']=='batchyes': thebatchdata = request.POST['comment'] batchyesno_val = request.POST['batchyesno'] print batchyesno_val else: batchyesno_val = 'batchno' print batchyesno_val thebatchdata = "empty,empty,empty,empty" outputofcalc = calc(request.POST['stype'], request.POST['contmagflux'], mag_val, fc_val, isize_val, size_val, radius_val, fluxt_val, fline_val, wline_val, nfwhmline_val, cnfwhmline_val, fwhmline_val, resolvedline_val, spect_val, bandc_val, request.POST['om_val'], vph_val, skycond_val, moon_val, airmass_val, seeing_val, cmode_val, snr_val, numframes_val, exptimepframe_val, nsbundles_val, plotflag_val, batchyesno_val, thebatchdata ) # cleanstring = string1.replace("\'", '\n') # cleanstring = cleanstring.replace(",", ' ') # cleanstring = cleanstring.replace("u\n", '\n') # cleanstring = cleanstring[1:].replace("(", '\n') # cleanstring = cleanstring[:-1].replace("(", '\n') # cleanstring = [outtextstring,inputstring,coutputstring,loutputstring] print 'LOG: About to leave compute5 and return outputofcalc' # print outputofcalc return outputofcalc ############################################################################## ############################################################################## def basic(request): return render(request, 'etc/index.html') def get_info(request): # if this is a POST request we need to process the form data if request.method == 'GET': # create a form instance and populate it with data from the request: form1 = TargetForm(request.GET) formU = UploadFileForm(request.GET) form2 = InstrumentForm(request.GET) form3 = AtmosphericConditionsForm(request.GET) form4 = ObservationalSetupForm(request.GET) form5 = OutputSetupForm(request.GET) # if a GET (or any other method) we'll create a blank form else: form1 = TargetForm() formU = UploadFileForm() form2 = InstrumentForm() form3 = AtmosphericConditionsForm() form4 = ObservationalSetupForm() form5 = OutputSetupForm() return render(request, 'etc/webmegaraetc-1.0.2.html', { 'form1': form1, 'formU': formU, 'form2': form2, 'form3': form3, 'form4': form4, 'form5': form5, }) # LOADS THIS AFTER PUSHING "START" in index.html def etc_form(request): form1 = TargetForm() formU = UploadFileForm() form2 = InstrumentForm() form3 = AtmosphericConditionsForm() form4 = ObservationalSetupForm() form5 = OutputSetupForm() total_formu = {'form1': form1, 'formU': formU, 'form2': form2, 'form3': form3, 'form4': form4, 'form5': form5, } return render(request, 'etc/webmegaraetc-1.0.2.html', total_formu) ######################## # ON HOLD UNTIL I FIND A GOOD WAY TO UPLOAD FILES # MIGRATING MODELS DIDN'T WORK EITHER. # ######################## def uploadView(request): # phase = get_object_or_404(Phase, pk=int(phase_id)) if request.method == 'POST': # form = UploadFileForm(request.POST, request.FILES) form = UploadFileForm(request.POST, request.FILES) # print dir(request) formg = TargetForm(request.POST) print form print formg['pfilter'] print settings.MEDIA_ROOT print "" print "REQUEST.POST=", request.POST # print request.META # print request.encoding print (form.errors) print '' print 'OK UP TO HERE' print request.FILES print 'REQUEST FILES=', request.FILES['myfile'] print '' if form.is_valid(): print 'VALID' # newdoc = MyModel(filename = request.FILES['myfile']) # newdoc.save() # doc_to_save = request.FILES['fileupload'] # filename = doc_to_save._get_name() # fd = open('uploads/'+str(filename),'wb') # for chunk in doc_to_save.chunks(): # fd.write(chunk) # fd.close() # return HttpResponseRedirect(reverse('uploadView')) return HttpResponseRedirect('/success/url/') else: print 'WARNING: INVALID FORM!' print (form.errors) form = UploadFileForm() # documents = Document.objects.filter(phase=phase_id) return render(request, 'etc/upload.html', {'form': form}) ############################################## ############################################## ### PERFORM CALCULATIONS. ### THIS RETURNS THE OUTPUT IN JSON FORMAT AND IS USED IN THE JS FILE ### FINAL STRING CLEANSING/FILTERING HERE ### # def etc_do(request): if request.method == 'GET': # import matplotlib.pyplot as plt # import mpld3 # from mpld3 import plugins, utils # # plt.plot([3,1,4,1,5], 'ks-', mec='w', mew=5, ms=20) # mpld3.save_html() return HttpResponse("<html><body>The GET method works!</body></html>") elif request.method == 'POST': print '### LOG: ETC_DO' start_time = time.time() start_time_string = time.strftime("%H:%M:%S") print 'Start time: ', str(start_time) print 'Date time: ', start_time_string ############################################## ############################################## ### ATTEMPT TO PRINT UPLOADED FILENAME (TEST) # up = uploadView(request) print request.body ############################################## ############################################## ### LAUNCH COMPUTATION if request.POST['stype'] == 'P' and request.POST['batchyesno']=='batchyes': print request.POST['batchyesno'] print request.POST['comment'] outputofcalc = compute5(request) print '### LOG: ETC_DO: OUTPUTOFCALC SUCCESSFULLY COMPUTED' ############################################## ############################################## tocheck = str(outputofcalc['outtext']) if not tocheck: outtextstring = "" # No warning. else: outtextstring = tocheck ############################################## ### OUTPUT TEXT ############################################## inputstring = '<br /><p>' + str(outputofcalc['texti']) + '</p>' coutputstring = '<br /><p>' + str(outputofcalc['textoc']) + '</p>' loutputstring = '<br /><p>' + str(outputofcalc['textol']) + '</p>' textcalcstring = '<br /><table border=1 id="mathtextid"><tr><td>' + str( outputofcalc['textcalc']) + '</td></tr></table>' # Suck out relevant data from database vph = request.POST['vph'] queryvph = VPHSetup.objects.filter(pk=vph).values() vph_val = queryvph[0]['name'] vph_minval = queryvph[0]['lambda_b'] vph_maxval = queryvph[0]['lambda_e'] spec = request.POST['spectype'] queryspec = SpectralTemplate.objects.filter(pk=spec).values() entry_spec_name = queryspec[0]['name'] ################################################################ ########################## GRAPHICS ############################ ################################################################ plotflag_val = request.POST['plotflag'] if plotflag_val == 'yes': # First, prepare variables to be plotted # Check if computation outputs are conform if not tocheck: x = outputofcalc['lamb'] y = outputofcalc['fc'] #+ outputofcalc['fs'] ysky = outputofcalc['fs'] label1 = entry_spec_name label2 = outputofcalc['mag_val'] label3 = outputofcalc['bandc_val'] lc = outputofcalc['lambdaeff'] sourcet = outputofcalc['sourcet_val'] fluxt = outputofcalc['fluxt_val'] x2 = x x2b = x2 x2c = x2 x2d = x2 if outputofcalc['fluxt_val'] == 'L': y2 = outputofcalc['pframesn_pervoxel_fiber'] y2b = outputofcalc['allframesn_pervoxel_fiber'] y2c = outputofcalc['pframesn_pervoxel_all'] y2d = outputofcalc['allframesn_pervoxel_all'] else: y2 = outputofcalc['pframesn_pervoxel_c'] y2b = outputofcalc['allframesn_pervoxel_c'] y2c = outputofcalc['pframesn_pervoxel_cr1'] y2d = outputofcalc['allframesn_pervoxel_cr1'] label2a = entry_spec_name label2b = vph_val label2c = outputofcalc['bandc_val'] else: x = numpy.arange(1, 1001, 1) y = numpy.arange(1, 1001, 1) ysky = numpy.arange(1, 1001, 1) x2 = numpy.arange(1, 1001, 1) y2 = numpy.arange(1, 1001, 1) x2b = numpy.arange(1, 1001, 1) y2b = numpy.arange(1, 1001, 1) x2c = numpy.arange(1, 1001, 1) y2c = numpy.arange(1, 1001, 1) x2d = numpy.arange(1, 1001, 1) y2d = numpy.arange(1, 1001, 1) label1 = "none" label2 = 20.0 # Float label3 = "none" lc = 0 sourcet = "none" fluxt = "none" label2a = "none" label2b = "none" label2c = "none" # More things to check if not tocheck: x3 = outputofcalc['wline_val'] y3 = outputofcalc['fwhmline_val'] z3 = outputofcalc['fline_val'] else: x3 = 0 y3 = 0 z3 = 0 # LEGACY CODE (MPLD3) # figura = plot_and_save_new('', x, y, x3, y3, z3, # vph_minval, vph_maxval, # label1, label2, label3) # html = mpld3.fig_to_html(figura) # html += mpld3.fig_to_html(figura2) # html = html.replace("None", "") # No se xq introduce string None thescript, thediv = bokehplot1(sourcet, x, y, ysky, x3, y3, z3, vph_minval, vph_maxval, label1, label2, label3, x2, y2, x2b, y2b, x2c, y2c, x2d, y2d, label2a, label2b, label2c, fluxt, lc) else: thescript = "" thediv = "" html = "" ################################################################ ################################################################ ################################################################ # Check variables and format them to strings if not tocheck: om_val_string = str(outputofcalc['om_val']) bandc_val_string = str(outputofcalc['bandc_val']) sourcet_val_string = str(outputofcalc['sourcet_val']) if sourcet_val_string == 'E': sourcet_val_string = 'Extended' else: sourcet_val_string = 'Point' mag_val_string = str('%.2f' % outputofcalc['mag_val']) netflux_string = '%.3e' % outputofcalc['netflux'] size_val_string = str('%.2f' % outputofcalc['size_val']) radius_val_string = str('%.2f' % outputofcalc['radius_val']) seeingx_string = str(outputofcalc['seeingx']) fluxt_val_string = outputofcalc['fluxt_val'] if fluxt_val_string == "L": fluxt_name_string = 'Line+Continuum' else: fluxt_name_string = 'Continuum' wline_val_string = str(outputofcalc['wline_val']) fline_val_string = str(outputofcalc['fline_val']) fwhmline_val_string = str(outputofcalc['fwhmline_val']) vph_val_string = str(outputofcalc['vph_val']) skycond_val_string = str(outputofcalc['skycond_val']) moon_val_string = str(outputofcalc['moon_val']) airmass_val_string = str(outputofcalc['airmass_val']) seeing_zenith_string = str(outputofcalc['seeing_zenith']) fcont_string = '%.3e' % outputofcalc['fcont'] contatlambdacvph_string = '%.3e' % outputofcalc['contatlambdacvph'] fsky_string = '%.3e' % outputofcalc['fsky'] numframe_val = outputofcalc['numframe_val'] numframe_val_string = "{0:.0f}".format(float(numframe_val)) exptimepframe_val_string = str(outputofcalc['exptimepframe_val']) exptime_val_string = str(outputofcalc['exptime_val']) npdark_val_string = str(outputofcalc['npdark_val']) nsbundles_val_string = str(outputofcalc['nsbundles_val']) nfwhmline_val_string = str(outputofcalc['nfwhmline_val']) cnfwhmline_val_string = str(outputofcalc['cnfwhmline_val']) resolvedline_val_string = str(outputofcalc['resolvedline_val']) bandsky_string = str(outputofcalc['bandsky']) sourcespectrum_string = str(outputofcalc['sourcespectrum']) lamb_string = str(outputofcalc['lamb']) spect_val_string = str(outputofcalc['spect_val']) fc_string = str(outputofcalc['fc']) # pframesn_psp_asp_string = str(outputofcalc['pframesn_psp_asp']) # allframesn_psp_asp_string = str(outputofcalc['pframesn_psp_asp']) # pframesn_psp_asp_all_string = str(outputofcalc['pframesn_psp_asp_all']) # allframesn_psp_asp_all_string = str(outputofcalc['allframesn_psp_asp_all']) nfibres_string = str(outputofcalc['nfibres']) nfib_string = str(outputofcalc['nfib']) nfib1def_string = str(outputofcalc['nfib1def']) sncont_p2sp_all_string = "{0:.2f}".format( float(outputofcalc['sncont_p2sp_all']) / 2) tsncont_p2sp_all_val = float(outputofcalc['tsncont_p2sp_all'] / 2) tsncont_p2sp_all_string = "{0:.2f}".format( float(outputofcalc['tsncont_p2sp_all']) / 2) sncont_pspfwhm_all_string = "{0:.2f}".format( float(outputofcalc['sncont_p2sp_all'])) tsncont_pspfwhm_all_string = "{0:.2f}".format( float(outputofcalc['tsncont_p2sp_all'])) sncont_1aa_all_string = "{0:.2f}".format( float(outputofcalc['sncont_1aa_all'])) tsncont_1aa_all_string = "{0:.2f}".format( float(outputofcalc['tsncont_1aa_all'])) sncont_band_all_string = "{0:.2f}".format( float(outputofcalc['sncont_band_all'])) tsncont_band_all_string = "{0:.2f}".format( float(outputofcalc['tsncont_band_all'])) sncont_p2sp_fibre_string = "{0:.2f}".format( float(outputofcalc['sncont_p2sp_fibre'])) tsncont_p2sp_fibre_string = "{0:.2f}".format( float(outputofcalc['tsncont_p2sp_fibre'])) sncont_1aa_fibre_string = "{0:.2f}".format( float(outputofcalc['sncont_1aa_fibre'])) tsncont_1aa_fibre_string = "{0:.2f}".format( float(outputofcalc['tsncont_1aa_fibre'])) sncont_band_fibre_string = str(outputofcalc['sncont_band_fibre']) tsncont_band_fibre_string = str(outputofcalc['tsncont_band_fibre']) sncont_p2sp_seeing_string = "{0:.2f}".format( float(outputofcalc['sncont_p2sp_seeing'])) tsncont_p2sp_seeing_string = "{0:.2f}".format( float(outputofcalc['tsncont_p2sp_seeing'])) sncont_1aa_seeing_string = "{0:.2f}".format( float(outputofcalc['sncont_1aa_seeing'])) tsncont_1aa_seeing_string = "{0:.2f}".format( float(outputofcalc['tsncont_1aa_seeing'])) sncont_band_seeing_string = "{0:.2f}".format( float(outputofcalc['sncont_band_seeing'])) tsncont_band_seeing_string = "{0:.2f}".format( float(outputofcalc['tsncont_band_seeing'])) sncont_p2sp_1_string = "{0:.2f}".format( float(outputofcalc['sncont_p2sp_1'])) tsncont_p2sp_1_string = "{0:.2f}".format( float(outputofcalc['tsncont_p2sp_1'])) sncont_1aa_1_string = "{0:.2f}".format( float(outputofcalc['sncont_1aa_1'])) tsncont_1aa_1_string = "{0:.2f}".format( float(outputofcalc['tsncont_1aa_1'])) sncont_band_1_string = "{0:.2f}".format( float(outputofcalc['sncont_band_1'])) tsncont_band_1_string = "{0:.2f}".format( float(outputofcalc['tsncont_band_1'])) sncont_psp_pspp_string = "{0:.2f}".format( float(outputofcalc['sncont_psp_pspp'])) tsncont_psp_pspp_string = "{0:.2f}".format( float(outputofcalc['tsncont_psp_pspp'])) sncont_psp_pspp2_string = "{0:.2f}".format( float(outputofcalc['sncont_psp_pspp'] * 2)) tsncont_psp_pspp2_string = "{0:.2f}".format( float(outputofcalc['tsncont_psp_pspp'] * 2)) lambdaeff_string = str(outputofcalc['lambdaeff']) snline_all_string = "{0:.2f}".format( float(outputofcalc['snline_all'])) tsnline_all_string = "{0:.2f}".format( float(outputofcalc['tsnline_all'])) snline_fibre_string = "{0:.2f}".format( float(outputofcalc['snline_fibre'])) tsnline_fibre_string = "{0:.2f}".format( float(outputofcalc['tsnline_fibre'])) snline_pspp_string = "{0:.2f}".format( float(outputofcalc['snline_pspp'])) tsnline_pspp_string = "{0:.2f}".format( float(outputofcalc['tsnline_pspp'])) snline_1_aa_string = "{0:.2f}".format( float(outputofcalc['snline_1_aa'])) tsnline_1_aa_string = "{0:.2f}".format( float(outputofcalc['tsnline_1_aa'])) snline_seeing_string = str(outputofcalc['snline_seeing']) tsnline_seeing_string = "{0:.2f}".format( float(outputofcalc['tsnline_seeing'])) snline_1_string = str(outputofcalc['snline_1']) tsnline_1_string = "{0:.2f}".format( float(outputofcalc['tsnline_1'])) snline_voxel_string = "{0:.2f}".format( float(outputofcalc['snline_voxel'])) tsnline_voxel_string = "{0:.2f}".format( float(outputofcalc['tsnline_voxel'])) snline_fibre1aa_string = "{0:.2f}".format( float(outputofcalc['snline_fibre1aa'])) tsnline_fibre1aa_string = "{0:.2f}".format( float(outputofcalc['tsnline_fibre1aa'])) seeing_centermean_val = outputofcalc['seeing_centermean'] seeing_centermean_string = str(seeing_centermean_val) seeing_ring1mean_val = outputofcalc['seeing_ring1mean'] seeing_ring1mean_string = str(seeing_ring1mean_val) seeing_ring2mean_val = outputofcalc['seeing_ring2mean'] seeing_ring2mean_string = str(seeing_ring2mean_val) seeing_total_val = outputofcalc['seeing_total'] seeing_total_string = str(seeing_total_val) seeing_cr1_val = seeing_centermean_val + seeing_ring1mean_val seeing_cr1_string = "{0:.2f}".format(seeing_cr1_val) seeing_cr1r2_val = seeing_centermean_val + seeing_ring1mean_val + seeing_ring2mean_val seeing_cr1r2_string = "{0:.2f}".format(round(seeing_cr1r2_val)) tsncont_centermean = "{0:.2f}".format( (seeing_centermean_val / 100) * tsncont_p2sp_all_val) tsncont_ring1mean = "{0:.2f}".format( (seeing_ring1mean_val / 100) * tsncont_p2sp_all_val) tsncont_ring2mean = "{0:.2f}".format( (seeing_ring2mean_val / 100) * tsncont_p2sp_all_val) tsncont_total = "{0:.2f}".format( (seeing_total_val / 100) * tsncont_p2sp_all_val) sncont_centerspaxel_voxel_val = outputofcalc[ 'sncont_centerspaxel_voxel'] sncont_centerspaxel_voxel_string = "{0:.2f}".format( float(sncont_centerspaxel_voxel_val)) sncont_r1spaxel_voxel_val = outputofcalc['sncont_r1spaxel_voxel'] sncont_r1spaxel_voxel_string = "{0:.2f}".format( float(sncont_r1spaxel_voxel_val)) sncont_r2spaxel_voxel_val = outputofcalc['sncont_r2spaxel_voxel'] sncont_r2spaxel_voxel_string = "{0:.2f}".format( float(sncont_r2spaxel_voxel_val)) sncont_cr1spaxels_voxel_val = outputofcalc[ 'sncont_cr1spaxels_voxel'] sncont_cr1spaxels_voxel_string = "{0:.2f}".format( float(sncont_cr1spaxels_voxel_val)) sncont_cr1r2spaxels_voxel_val = outputofcalc[ 'sncont_cr1r2spaxels_voxel'] sncont_cr1r2spaxels_voxel_string = "{0:.2f}".format( float(sncont_cr1r2spaxels_voxel_val)) # tsncont_centerspaxel_voxel_val = outputofcalc[ 'tsncont_centerspaxel_voxel'] tsncont_centerspaxel_voxel_string = "{0:.2f}".format( float(tsncont_centerspaxel_voxel_val)) tsncont_cr1spaxels_voxel_val = outputofcalc[ 'tsncont_cr1spaxels_voxel'] tsncont_cr1spaxels_voxel_string = "{0:.2f}".format( float(tsncont_cr1spaxels_voxel_val)) tsncont_cr1r2spaxels_voxel_val = outputofcalc[ 'tsncont_cr1r2spaxels_voxel'] tsncont_cr1r2spaxels_voxel_string = "{0:.2f}".format( float(tsncont_cr1r2spaxels_voxel_val)) sncont_centerspaxel_aa_val = outputofcalc['sncont_centerspaxel_aa'] sncont_centerspaxel_aa_string = "{0:.2f}".format( float(sncont_centerspaxel_aa_val)) sncont_cr1spaxels_aa_val = outputofcalc['sncont_cr1spaxels_aa'] sncont_cr1spaxels_aa_string = "{0:.2f}".format( float(sncont_cr1spaxels_aa_val)) sncont_cr1r2spaxels_aa_val = outputofcalc['sncont_cr1r2spaxels_aa'] sncont_cr1r2spaxels_aa_string = "{0:.2f}".format( float(sncont_cr1r2spaxels_aa_val)) # tsncont_centerspaxel_aa_val = outputofcalc[ 'tsncont_centerspaxel_aa'] tsncont_centerspaxel_aa_string = "{0:.2f}".format( float(tsncont_centerspaxel_aa_val)) tsncont_cr1spaxels_aa_val = outputofcalc['tsncont_cr1spaxels_aa'] tsncont_cr1spaxels_aa_string = "{0:.2f}".format( float(tsncont_cr1spaxels_aa_val)) tsncont_cr1r2spaxels_aa_val = outputofcalc[ 'tsncont_cr1r2spaxels_aa'] tsncont_cr1r2spaxels_aa_string = "{0:.2f}".format( float(tsncont_cr1r2spaxels_aa_val)) sncont_centerspaxel_all_val = outputofcalc[ 'sncont_centerspaxel_all'] sncont_centerspaxel_all_string = "{0:.2f}".format( float(sncont_centerspaxel_all_val)) sncont_cr1spaxels_all_val = outputofcalc['sncont_cr1spaxels_all'] sncont_cr1spaxels_all_string = "{0:.2f}".format( float(sncont_cr1spaxels_all_val)) sncont_cr1r2spaxels_all_val = outputofcalc[ 'sncont_cr1r2spaxels_all'] sncont_cr1r2spaxels_all_string = "{0:.2f}".format( float(sncont_cr1r2spaxels_all_val)) # tsncont_centerspaxel_all_val = outputofcalc[ 'tsncont_centerspaxel_all'] tsncont_centerspaxel_all_string = "{0:.2f}".format( float(tsncont_centerspaxel_all_val)) tsncont_cr1spaxels_all_val = outputofcalc['tsncont_cr1spaxels_all'] tsncont_cr1spaxels_all_string = "{0:.2f}".format( float(tsncont_cr1spaxels_all_val)) tsncont_cr1r2spaxels_all_val = outputofcalc[ 'tsncont_cr1r2spaxels_all'] tsncont_cr1r2spaxels_all_string = "{0:.2f}".format( float(tsncont_cr1r2spaxels_all_val)) npixx_p2sp_all_val = outputofcalc['npixx_p2sp_all'] npixx_p2sp_all_string = "{0:.2f}".format(npixx_p2sp_all_val) npixy_p2sp_all_val = outputofcalc['npixy_p2sp_all'] npixy_p2sp_all_string = "{0:.2f}".format(npixy_p2sp_all_val) npixx_1aa_all_val = outputofcalc['npixx_1aa_all'] npixx_1aa_all_string = "{0:.2f}".format(npixx_1aa_all_val) npixy_1aa_all_val = outputofcalc['npixy_1aa_all'] npixy_1aa_all_string = "{0:.2f}".format(npixy_1aa_all_val) npixx_band_all_val = outputofcalc['npixx_band_all'] npixx_band_all_string = "{0:.2f}".format(npixx_band_all_val) npixy_band_all_val = outputofcalc['npixy_band_all'] npixy_band_all_string = "{0:.2f}".format(npixy_band_all_val) npixx_p2sp_fibre_val = outputofcalc['npixx_p2sp_fibre'] npixx_p2sp_fibre_string = "{0:.2f}".format(npixx_p2sp_fibre_val) npixy_p2sp_fibre_val = outputofcalc['npixy_p2sp_fibre'] npixy_p2sp_fibre_string = "{0:.2f}".format(npixy_p2sp_fibre_val) npixx_1aa_fibre_val = outputofcalc['npixx_1aa_fibre'] npixx_1aa_fibre_string = "{0:.2f}".format(npixx_1aa_fibre_val) npixy_1aa_fibre_val = outputofcalc['npixy_1aa_fibre'] npixy_1aa_fibre_string = "{0:.2f}".format(npixy_1aa_fibre_val) npixx_band_fibre_val = outputofcalc['npixx_band_fibre'] npixx_band_fibre_string = "{0:.2f}".format(npixx_band_fibre_val) npixy_band_fibre_val = outputofcalc['npixy_band_fibre'] npixy_band_fibre_string = "{0:.2f}".format(npixy_band_fibre_val) npixx_p2sp_seeing_val = outputofcalc['npixx_p2sp_seeing'] npixx_p2sp_seeing_string = "{0:.2f}".format(npixx_p2sp_seeing_val) npixy_p2sp_seeing_val = outputofcalc['npixy_p2sp_seeing'] npixy_p2sp_seeing_string = "{0:.2f}".format(npixy_p2sp_seeing_val) npixx_1aa_seeing_val = outputofcalc['npixx_1aa_seeing'] npixx_1aa_seeing_string = "{0:.2f}".format(npixx_1aa_seeing_val) npixy_1aa_seeing_val = outputofcalc['npixy_1aa_seeing'] npixy_1aa_seeing_string = "{0:.2f}".format(npixy_1aa_seeing_val) npixx_band_seeing_val = outputofcalc['npixx_band_seeing'] npixx_band_seeing_string = "{0:.2f}".format(npixx_band_seeing_val) npixy_band_seeing_val = outputofcalc['npixy_band_seeing'] npixy_band_seeing_string = "{0:.2f}".format(npixy_band_seeing_val) npixx_p2sp_1_val = outputofcalc['npixx_p2sp_1'] npixx_p2sp_1_string = "{0:.2f}".format(npixx_p2sp_1_val) npixy_p2sp_1_val = outputofcalc['npixy_p2sp_1'] npixy_p2sp_1_string = "{0:.2f}".format(npixy_p2sp_1_val) npixx_1aa_1_val = outputofcalc['npixx_1aa_1'] npixx_1aa_1_string = "{0:.2f}".format(npixx_1aa_1_val) npixy_1aa_1_val = outputofcalc['npixy_1aa_1'] npixy_1aa_1_string = "{0:.2f}".format(npixy_1aa_1_val) npixx_band_1_val = outputofcalc['npixx_band_1'] npixx_band_1_string = "{0:.2f}".format(npixx_band_1_val) npixy_band_1_val = outputofcalc['npixy_band_1'] npixy_band_1_string = "{0:.2f}".format(npixy_band_1_val) npixx_psp_pspp_val = outputofcalc['npixx_psp_pspp'] npixx_psp_pspp_string = "{0:.2f}".format(npixx_psp_pspp_val) npixy_psp_pspp_val = outputofcalc['npixy_psp_pspp'] npixy_psp_pspp_string = "{0:.2f}".format(npixy_psp_pspp_val) sncont_pdp_fibre_val = outputofcalc['sncont_pdp_fibre'] sncont_pdp_fibre_string = "{0:.2f}".format( float(sncont_pdp_fibre_val)) tsncont_pdp_fibre_val = outputofcalc['tsncont_pdp_fibre'] tsncont_pdp_fibre_string = "{0:.2f}".format( float(tsncont_pdp_fibre_val)) npixx_pdp_fibre_val = outputofcalc['npixx_pdp_fibre'] npixx_pdp_fibre_string = "{0:.2f}".format( float(npixx_pdp_fibre_val)) npixy_pdp_fibre_val = outputofcalc['npixy_pdp_fibre'] npixy_pdp_fibre_string = "{0:.2f}".format( float(npixy_pdp_fibre_val)) sncont_psp_fibre_val = outputofcalc['sncont_psp_fibre'] sncont_psp_fibre_string = "{0:.2f}".format( float(sncont_psp_fibre_val)) tsncont_psp_fibre_val = outputofcalc['tsncont_psp_fibre'] tsncont_psp_fibre_string = "{0:.2f}".format( float(tsncont_psp_fibre_val)) npixx_psp_fibre_val = outputofcalc['npixx_psp_fibre'] npixx_psp_fibre_string = "{0:.2f}".format( float(npixx_psp_fibre_val)) npixy_psp_fibre_val = outputofcalc['npixy_psp_fibre'] npixy_psp_fibre_string = "{0:.2f}".format( float(npixy_psp_fibre_val)) sncont_psp_all_val = outputofcalc['sncont_psp_all'] sncont_psp_all_string = "{0:.2f}".format(float(sncont_psp_all_val)) tsncont_psp_all_val = outputofcalc['tsncont_psp_all'] tsncont_psp_all_string = "{0:.2f}".format( float(tsncont_psp_all_val)) npixx_psp_all_val = outputofcalc['npixx_psp_all'] npixx_psp_all_string = "{0:.2f}".format(float(npixx_psp_all_val)) npixy_psp_all_val = outputofcalc['npixy_psp_all'] npixy_psp_all_string = "{0:.2f}".format(float(npixy_psp_all_val)) cmode = outputofcalc['cmode_val'] snr_val = outputofcalc['snr_val'] snr_string = "{0:.2f}".format(snr_val) print "SNR = ", snr_string # snrpframe_val = snr_val / numpy.sqrt(numframe_val) snrpframe_val = outputofcalc['snrpframe_val'] snrpframe_string = "{0:.2f}".format(snrpframe_val) etpframe_c_voxel_val = outputofcalc['etpframe_c_voxel_val'] if etpframe_c_voxel_val < 1: etpframe_c_voxel_string = "< 1" # etpframe_c_voxel_string = "{0:.2f}".format(etpframe_c_voxel_val) else: etpframe_c_voxel_string = "{0:.0f}".format(etpframe_c_voxel_val) etallframe_c_voxel_val = outputofcalc['etallframe_c_voxel_val'] if etallframe_c_voxel_val < 1: etallframe_c_voxel_string = "< 1" # etallframe_c_voxel_string = "{0:.2f}".format(etallframe_c_voxel_val) else: etallframe_c_voxel_string = "{0:.0f}".format(etallframe_c_voxel_val) etpframe_cr1_voxel_val = outputofcalc['etpframe_cr1_voxel_val'] if etpframe_cr1_voxel_val < 1: etpframe_cr1_voxel_string = "< 1" # etpframe_cr1_voxel_string = "{0:.2f}".format(etpframe_cr1_voxel_val) else: etpframe_cr1_voxel_string = "{0:.0f}".format(etpframe_cr1_voxel_val) etallframe_cr1_voxel_val = outputofcalc['etallframe_cr1_voxel_val'] if etallframe_cr1_voxel_val < 1: etallframe_cr1_voxel_string = "< 1" # etallframe_cr1_voxel_string = "{0:.2f}".format(etallframe_cr1_voxel_val) else: etallframe_cr1_voxel_string = "{0:.0f}".format(etallframe_cr1_voxel_val) etpframe_cr1r2_voxel_val = outputofcalc['etpframe_cr1r2_voxel_val'] if etpframe_cr1r2_voxel_val < 1: etpframe_cr1r2_voxel_string = "< 1" # etpframe_cr1r2_voxel_string = "{0:.2f}".format(etpframe_cr1r2_voxel_val) else: etpframe_cr1r2_voxel_string = "{0:.0f}".format(etpframe_cr1r2_voxel_val) etallframe_cr1r2_voxel_val = outputofcalc['etallframe_cr1r2_voxel_val'] if etallframe_cr1r2_voxel_val < 1: etallframe_cr1r2_voxel_string = "< 1" # etallframe_cr1r2_voxel_string = "{0:.2f}".format(etallframe_cr1r2_voxel_val) else: etallframe_cr1r2_voxel_string = "{0:.0f}".format(etallframe_cr1r2_voxel_val) etpframe_c_aa_val = outputofcalc['etpframe_c_aa_val'] if etpframe_c_aa_val < 1: etpframe_c_aa_string = "< 1" # etpframe_c_aa_string = "{0:.2f}".format(etpframe_c_aa_val) else: etpframe_c_aa_string = "{0:.0f}".format(etpframe_c_aa_val) etallframe_c_aa_val = outputofcalc['etallframe_c_aa_val'] if etallframe_c_aa_val < 1: etallframe_c_aa_string = "< 1" # etallframe_c_aa_string = "{0:.2f}".format(etallframe_c_aa_val) else: etallframe_c_aa_string = "{0:.0f}".format(etallframe_c_aa_val) etpframe_cr1_aa_val = outputofcalc['etpframe_cr1_aa_val'] if etpframe_cr1_aa_val < 1: etpframe_cr1_aa_string = "< 1" # etpframe_cr1_aa_string = "{0:.2f}".format(etpframe_cr1_aa_val) else: etpframe_cr1_aa_string = "{0:.0f}".format(etpframe_cr1_aa_val) etallframe_cr1_aa_val = outputofcalc['etallframe_cr1_aa_val'] if etallframe_cr1_aa_val < 1: etallframe_cr1_aa_string = "< 1" # etallframe_cr1_aa_string = "{0:.2f}".format(etallframe_cr1_aa_val) else: etallframe_cr1_aa_string = "{0:.0f}".format(etallframe_cr1_aa_val) etpframe_cr1r2_aa_val = outputofcalc['etpframe_cr1r2_aa_val'] if etpframe_cr1r2_aa_val < 1: etpframe_cr1r2_aa_string = "< 1" # etpframe_cr1r2_aa_string = "{0:.2f}".format(etpframe_cr1r2_aa_val) else: etpframe_cr1r2_aa_string = "{0:.0f}".format(etpframe_cr1r2_aa_val) etallframe_cr1r2_aa_val = outputofcalc['etallframe_cr1r2_aa_val'] if etallframe_cr1r2_aa_val <1: etallframe_cr1r2_aa_string = "< 1" # etallframe_cr1r2_aa_string = "{0:.2f}".format(etallframe_cr1r2_aa_val) else: etallframe_cr1r2_aa_string = "{0:.0f}".format(etallframe_cr1r2_aa_val) etpframe_c_all_val = outputofcalc['etpframe_c_all_val'] if etpframe_c_all_val < 1: etpframe_c_all_string = "< 1" # etpframe_c_all_string = "{0:.2f}".format(etpframe_c_all_val) else: etpframe_c_all_string = "{0:.0f}".format(etpframe_c_all_val) etallframe_c_all_val = outputofcalc['etallframe_c_all_val'] if etallframe_c_all_val < 1: etallframe_c_all_string = "< 1" # etallframe_c_all_string = "{0:.2f}".format(etallframe_c_all_val) else: etallframe_c_all_string = "{0:.0f}".format(etallframe_c_all_val) etpframe_cr1_all_val = outputofcalc['etpframe_cr1_all_val'] if etpframe_cr1_all_val < 1: etpframe_cr1_all_string = "< 1" # etpframe_cr1_all_string = "{0:.2f}".format(etpframe_cr1_all_val) else: etpframe_cr1_all_string = "{0:.0f}".format(etpframe_cr1_all_val) etallframe_cr1_all_val = outputofcalc['etallframe_cr1_all_val'] if etallframe_cr1_all_val < 1: etallframe_cr1_all_string = "< 1" # etallframe_cr1_all_string = "{0:.2f}".format(etallframe_cr1_all_val) else: etallframe_cr1_all_string = "{0:.0f}".format(etallframe_cr1_all_val) etpframe_cr1r2_all_val = outputofcalc['etpframe_cr1r2_all_val'] if etpframe_cr1r2_all_val < 1: etpframe_cr1r2_all_string = "< 1" # etpframe_cr1r2_all_string = "{0:.2f}".format(etpframe_cr1r2_all_val) else: etpframe_cr1r2_all_string = "{0:.0f}".format(etpframe_cr1r2_all_val) etallframe_cr1r2_all_val = outputofcalc['etallframe_cr1r2_all_val'] if etallframe_cr1r2_all_val < 1: etallframe_cr1r2_all_string = "< 1" # etallframe_cr1r2_all_string = "{0:.2f}".format(etallframe_cr1r2_all_val) else: etallframe_cr1r2_all_string = "{0:.0f}".format(etallframe_cr1r2_all_val) # etallframe_c_voxel_val = outputofcalc['etallframe_c_voxel_val'] # etallframe_c_voxel_string = "{0:.0f}".format(etallframe_c_voxel_val) # etpframe_cr1_voxel_val = outputofcalc['etpframe_cr1_voxel_val'] # etpframe_cr1_voxel_string = "{0:.0f}".format(etpframe_cr1_voxel_val) # etallframe_cr1_voxel_val = outputofcalc['etallframe_cr1_voxel_val'] # etallframe_cr1_voxel_string = "{0:.0f}".format(etallframe_cr1_voxel_val) # etpframe_cr1r2_voxel_val = outputofcalc['etpframe_cr1r2_voxel_val'] # etpframe_cr1r2_voxel_string = "{0:.0f}".format(etpframe_cr1r2_voxel_val) # etallframe_cr1r2_voxel_val = outputofcalc['etallframe_cr1r2_voxel_val'] # etallframe_cr1r2_voxel_string = "{0:.0f}".format(etallframe_cr1r2_voxel_val) # # etpframe_c_aa_val = outputofcalc['etpframe_c_aa_val'] # etpframe_c_aa_string = "{0:.0f}".format(etpframe_c_aa_val) # etallframe_c_aa_val = outputofcalc['etallframe_c_aa_val'] # etallframe_c_aa_string = "{0:.0f}".format(etallframe_c_aa_val) # etpframe_cr1_aa_val = outputofcalc['etpframe_cr1_aa_val'] # etpframe_cr1_aa_string = "{0:.0f}".format(etpframe_cr1_aa_val) # etallframe_cr1_aa_val = outputofcalc['etallframe_cr1_aa_val'] # etallframe_cr1_aa_string = "{0:.0f}".format(etallframe_cr1_aa_val) # etpframe_cr1r2_aa_val = outputofcalc['etpframe_cr1r2_aa_val'] # etpframe_cr1r2_aa_string = "{0:.0f}".format(etpframe_cr1r2_aa_val) # etallframe_cr1r2_aa_val = outputofcalc['etallframe_cr1r2_aa_val'] # etallframe_cr1r2_aa_string = "{0:.0f}".format(etallframe_cr1r2_aa_val) # etpframe_c_all_val = outputofcalc['etpframe_c_all_val'] # etpframe_c_all_string = "{0:.0f}".format(etpframe_c_all_val) # etallframe_c_all_val = outputofcalc['etallframe_c_all_val'] # etallframe_c_all_string = "{0:.0f}".format(etallframe_c_all_val) # etpframe_cr1_all_val = outputofcalc['etpframe_cr1_all_val'] # etpframe_cr1_all_string = "{0:.0f}".format(etpframe_cr1_all_val) # etallframe_cr1_all_val = outputofcalc['etallframe_cr1_all_val'] # etallframe_cr1_all_string = "{0:.0f}".format(etallframe_cr1_all_val) # etpframe_cr1r2_all_val = outputofcalc['etpframe_cr1r2_all_val'] # etpframe_cr1r2_all_string = "{0:.0f}".format(etpframe_cr1r2_all_val) # etallframe_cr1r2_all_val = outputofcalc['etallframe_cr1r2_all_val'] # etallframe_cr1r2_all_string = "{0:.0f}".format(etallframe_cr1r2_all_val) if cmode == 'S': cmode_string = 'SNR to exposure time' elif cmode == 'T': cmode_string = 'Exposure time to SNR' outhead1string = '<hr /><span class="boldlarge">Calculation Mode: ' + cmode_string + '<br>Observing Mode: ' + om_val_string + ', VPH: ' + vph_val_string + ', Source Type: ' + sourcet_val_string + ' </span>' + \ '<br /><span class="italicsmall"> Computation time: ' + "{0:.1f}".format((time.time() - start_time)) + ' seconds; </span>' tablecoutstring = '' if fluxt_val_string == 'L': from output_tablelout import tablelout tableloutstring = tablelout(fluxt_val_string, wline_val_string, snline_pspp_string, tsnline_pspp_string, snline_voxel_string, tsnline_voxel_string, snline_fibre1aa_string, tsnline_fibre1aa_string, snline_fibre_string, tsnline_fibre_string, snline_all_string, tsnline_all_string) else: tableloutstring = '<hr />' + \ 'No line input<br /><br />' if sourcet_val_string == 'Point': inputfluxstring = '<tr><td>Input flux:</td><td>' + netflux_string + ' erg/s/cm$^{2}$/$\mathrm{\mathring A}$</td></tr>' resultfluxstring = '<tr><td>Continuum flux per arcsec$^{2}$ <br />within the seeing disk<br />(@$\lambda_{C}$(VPH)):</td><td>' + contatlambdacvph_string + ' erg/s/cm$^{2}$/$\mathrm{\mathring A}$/arcsec$^{2}$</td></tr>' else: inputfluxstring = '<tr><td>Input flux:</td><td>' + netflux_string + ' erg/s/cm$^{2}$/$\mathrm{\mathring A}$/arcsec$^{2}$</td></tr>' resultfluxstring = '' if om_val_string == 'MOS': switchstring = 'Sky-bundles:' elif om_val_string == 'LCB': switchstring = 'Sky-fibers:' if fluxt_val_string == 'L': addlinestring = '<tr><td>Resolved line?:</td><td>' + resolvedline_val_string + '</td></tr>' + \ '<tr><td>Line wavelength:</td><td>' + wline_val_string + ' AA</td></tr>' + \ '<tr><td>Line flux (integrated):</td><td>' + fline_val_string + ' erg/s/cm$^{2}$</td></tr>' + \ '<tr><td>Line FWHM:</td><td>' + fwhmline_val_string + ' AA</td></tr>' elif fluxt_val_string == 'C': addlinestring = '' if cmode == 'T': addstring0 = '<tr><td>Exptime per frame:</td><td>' + exptimepframe_val_string + ' s</td></tr>' addstringT = '<tr><td>Total exptime:</td><td>' + exptime_val_string + ' s</td></tr>' addstringS = '' elif cmode == 'S': addstring0 = '' addstringT = '' addstringS = '<tr><td>Total SNR:</td><td>' + snr_string + '</td></tr>' tableinputstring = '<table border=1>' + \ '<tr><td>INPUT PARAMETERS:</td><td></td></tr>' + \ '<tr><td>Calculation mode:</td><td>' + cmode_string + '</td></tr>' + \ '<tr><td>Source type:</td><td>' + sourcet_val_string + '</td></tr>' + \ '<tr><td>Area:</td><td>' + size_val_string + ' arcsec$^{2}$</td></tr>' + \ '<tr><td>Observing mode:</td><td>' + om_val_string + '</td></tr>' + \ '<tr><td>VPH:</td><td>' + vph_val_string + '</td></tr>' + \ '<tr><td>Input flux type:</td><td>' + fluxt_name_string + '</td></tr>' + \ '<tr><td>Source spectrum:</td><td>' + spect_val_string + '</td></tr>' + \ '<tr><td>Input continuum:</td><td>' + bandc_val_string + ' = ' + mag_val_string + 'mag</td></tr>' + \ inputfluxstring + \ resultfluxstring + \ addlinestring + \ '<tr><td>*Sky Condition:</td><td>' + skycond_val_string + '</td></tr>' + \ '<tr><td>Moon:</td><td>' + moon_val_string + '</td></tr>' + \ '<tr><td>Airmass: X=</td><td>' + airmass_val_string + '</td></tr>' + \ '<tr><td>Seeing(@X=1):</td><td>' + seeing_zenith_string + ' arcsec</td></tr>' + \ '<tr><td>Sky-flux(R,@X):</td><td>' + fsky_string + ' erg/s/cm$^{2}$/$\mathrm{\mathring A}$/arcsec$^{2}$</td></tr>' + \ '<tr><td>Seeing(@X):</td><td>' + seeingx_string + ' arcsec</td></tr>' + \ '<tr><td>*Observation:</td></td><td></tr>' + \ '<tr><td>Number of frames:</td><td>' + numframe_val_string + '</td></tr>' + \ addstring0 + \ addstringT + \ addstringS + \ '<tr><td>NP_Dark:</td><td>' + npdark_val_string + '</td></tr>' + \ '<tr><td>' + switchstring + '</td><td>' + nsbundles_val_string + '</td></tr>' + \ '</table><br />' # NOT USED BUT KEEP FOR TESTING tablecalcstring = '<br /><br />' + \ '<p id="mathid">Details of calculations (TEST):<br />' + \ '$$\\textrm{Radius of source, } R_{source} = ' + radius_val_string + '\\textrm{ arcsec}$$' + \ '$$\\textrm{Area of source, } \Omega_{source} = \pi \\times ' + radius_val_string + '^{2} = ' + size_val_string + '\\textrm{ arcsec}^{2}$$' + \ '$$\\textrm{Radius of one fiber, } R_{fiber} = 0.31 \\textrm{ arcsec}$$' + \ '$$\\textrm{Area of one fiber, } \Omega_{fiber} = 3\sqrt{3} \left( \\frac{R_{fiber}^{2}}{2} \\right)$$' + \ '$$\\textrm{Number of fibers used to measure sky} = \\frac{\Omega_{source}}{\Omega_{fiber}} = ' + nfibres_string + '$$' + \ '$$\\textrm{Area in which sky has been measured, } \Omega_{sky} = ' + nfibres_string + '\\times \Omega_{fiber}$$' + \ '</p>' + \ '<br /><br />' # print om_val_string # print 'PID = ', os.getpid() # NEW OUTPUT CONTINUUM TABLE FOR MOS AND LCB if cmode == 'T': if om_val_string == 'MOS' and sourcet_val_string == 'Point': from output_table_MOS_P_T import tablenewpsfMPT tablenewpsfstring = tablenewpsfMPT(lambdaeff_string, seeingx_string, exptimepframe_val_string, exptime_val_string, numframe_val_string, seeing_centermean_string, seeing_cr1_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string ) elif om_val_string == 'MOS' and sourcet_val_string == 'Extended': from output_table_MOS_E_T import tablenewpsfMET tablenewpsfstring = tablenewpsfMET(lambdaeff_string, seeingx_string, exptimepframe_val_string, exptime_val_string, numframe_val_string, nfibres_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_pspfwhm_all_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_pspfwhm_all_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_1aa_all_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_1aa_all_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_band_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_band_all_string) elif om_val_string == 'LCB' and sourcet_val_string == 'Point': from output_table_LCB_P_T import tablenewpsfLPT tablenewpsfstring = tablenewpsfLPT(lambdaeff_string, seeingx_string, exptimepframe_val_string, exptime_val_string, numframe_val_string, seeing_centermean_string, seeing_cr1_string, seeing_cr1r2_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_cr1r2spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_cr1r2spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_cr1r2spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_cr1r2spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_cr1r2spaxels_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_cr1r2spaxels_all_string) elif om_val_string == 'LCB' and sourcet_val_string == 'Extended': from output_table_LCB_E_T import tablenewpsfLET tablenewpsfstring = tablenewpsfLET(lambdaeff_string, seeingx_string, exptimepframe_val_string, exptime_val_string, numframe_val_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_cr1r2spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_cr1r2spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_cr1r2spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_cr1r2spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_cr1r2spaxels_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_cr1r2spaxels_all_string) else: tablenewpsfstring = '' elif cmode == 'S': if om_val_string == 'MOS' and sourcet_val_string == 'Point': from output_table_MOS_P_S import tablenewpsfMPS tablenewpsfstring = tablenewpsfMPS(snr_string, numframe_val_string, snrpframe_string, exptimepframe_val_string, exptime_val_string, etpframe_c_voxel_string, etallframe_c_voxel_string, etpframe_cr1_voxel_string, etallframe_cr1_voxel_string, etpframe_cr1r2_voxel_string, etallframe_cr1r2_voxel_string, etpframe_c_aa_string, etallframe_c_aa_string, etpframe_cr1_aa_string, etallframe_cr1_aa_string, etpframe_cr1r2_aa_string, etallframe_cr1r2_aa_string, etpframe_c_all_string, etallframe_c_all_string, etpframe_cr1_all_string, etallframe_cr1_all_string, etpframe_cr1r2_all_string, etallframe_cr1r2_all_string, lambdaeff_string, seeingx_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_cr1r2spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_cr1r2spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_cr1r2spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_cr1r2spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, ) elif om_val_string == 'MOS' and sourcet_val_string == 'Extended': from output_table_MOS_E_S import tablenewpsfMES tablenewpsfstring = tablenewpsfMES(snr_string, numframe_val_string, snrpframe_string, exptimepframe_val_string, exptime_val_string, etpframe_c_voxel_string, etallframe_c_voxel_string, etpframe_cr1_voxel_string, etallframe_cr1_voxel_string, etpframe_cr1r2_voxel_string, etallframe_cr1r2_voxel_string, etpframe_c_aa_string, etallframe_c_aa_string, etpframe_cr1_aa_string, etallframe_cr1_aa_string, etpframe_cr1r2_aa_string, etallframe_cr1r2_aa_string, etpframe_c_all_string, etallframe_c_all_string, etpframe_cr1_all_string, etallframe_cr1_all_string, etpframe_cr1r2_all_string, etallframe_cr1r2_all_string, lambdaeff_string, seeingx_string, nfibres_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_pspfwhm_all_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_pspfwhm_all_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_1aa_all_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_1aa_all_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_band_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_band_all_string, ) elif om_val_string == 'LCB' and sourcet_val_string == 'Extended': from output_table_LCB_E_S import tablenewpsfLES tablenewpsfstring = tablenewpsfLES(snr_string, numframe_val_string, snrpframe_string, exptimepframe_val_string, exptime_val_string, etpframe_c_voxel_string, etallframe_c_voxel_string, etpframe_cr1_voxel_string, etallframe_cr1_voxel_string, etpframe_cr1r2_voxel_string, etallframe_cr1r2_voxel_string, etpframe_c_aa_string, etallframe_c_aa_string, etpframe_cr1_aa_string, etallframe_cr1_aa_string, etpframe_cr1r2_aa_string, etallframe_cr1r2_aa_string, etpframe_c_all_string, etallframe_c_all_string, etpframe_cr1_all_string, etallframe_cr1_all_string, etpframe_cr1r2_all_string, etallframe_cr1r2_all_string, lambdaeff_string, seeingx_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_cr1r2spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_cr1r2spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_cr1r2spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_cr1r2spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_cr1r2spaxels_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_cr1r2spaxels_all_string) elif om_val_string == 'LCB' and sourcet_val_string == 'Point': from output_table_LCB_P_S import tablenewpsfLPS tablenewpsfstring = tablenewpsfLPS(snr_string, numframe_val_string, snrpframe_string, exptimepframe_val_string, exptime_val_string, etpframe_c_voxel_string, etallframe_c_voxel_string, etpframe_cr1_voxel_string, etallframe_cr1_voxel_string, etpframe_cr1r2_voxel_string, etallframe_cr1r2_voxel_string, etpframe_c_aa_string, etallframe_c_aa_string, etpframe_cr1_aa_string, etallframe_cr1_aa_string, etpframe_cr1r2_aa_string, etallframe_cr1r2_aa_string, etpframe_c_all_string, etallframe_c_all_string, etpframe_cr1_all_string, etallframe_cr1_all_string, etpframe_cr1r2_all_string, etallframe_cr1r2_all_string, lambdaeff_string, seeingx_string, sncont_centerspaxel_voxel_string, sncont_cr1spaxels_voxel_string, sncont_cr1r2spaxels_voxel_string, tsncont_centerspaxel_voxel_string, tsncont_cr1spaxels_voxel_string, tsncont_cr1r2spaxels_voxel_string, sncont_centerspaxel_aa_string, sncont_cr1spaxels_aa_string, sncont_cr1r2spaxels_aa_string, tsncont_centerspaxel_aa_string, tsncont_cr1spaxels_aa_string, tsncont_cr1r2spaxels_aa_string, sncont_centerspaxel_all_string, sncont_cr1spaxels_all_string, sncont_cr1r2spaxels_all_string, tsncont_centerspaxel_all_string, tsncont_cr1spaxels_all_string, tsncont_cr1r2spaxels_all_string) else: tablenewpsfstring = '' # NOT WORKING YET SO SETTING TO EMPTY tablenewpsflinestring = '' # FOR DOWNLOADABLE FILES forfileoutputstring = outputofcalc['forfileoutput'] forfileoutput2string = outputofcalc['forfileoutput2'] forfileoutput3string = outputofcalc['forfileoutput3'] # EXTRA FOOTER footerstring = '<span class="italicsmall">Time of request: ' + start_time_string + ' ; End of request: ' + time.strftime( "%H:%M:%S") + '</span>' else: outhead1string = '' tablecoutstring = '' tableloutstring = '' tableinputstring = '' tablecalcstring = '' tablenewpsfstring = '' tablenewpsflinestring = '' forfileoutputstring = '' forfileoutput2string = '' forfileoutput3string = '' footerstring = '' print "### LOG: ABOUT TO QUIT ETC_DO; JSON OUTPUT TO JAVASCRIPT." from django.http import JsonResponse return JsonResponse({'outtext': outtextstring, 'textinput': inputstring, 'textcout': coutputstring, 'textlout': loutputstring, 'textcalc': textcalcstring, 'tablecout': tablecoutstring, 'tablelout': tableloutstring, 'tableinput': tableinputstring, 'tablecalc': tablecalcstring, 'tablenewpsf': tablenewpsfstring, 'tablenewpsfline': tablenewpsflinestring, 'thescript': thescript, 'thediv': thediv, 'outhead1': outhead1string, 'forfile': forfileoutputstring, 'forfile2': forfileoutput2string, 'forfile3': forfileoutput3string, 'footerstring': footerstring, })
gpl-3.0
alberlab/alabtools
alabtools/utils.py
1
53227
# Copyright (C) 2017 University of Southern California and # Nan Hua # # Authors: Nan Hua, Guido Polles # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import division, print_function __author__ = "Nan Hua and Guido Polles" __license__ = "GPL" __version__ = "0.0.3" __email__ = "nhua@usc.edu" import os import re import math import numpy as np import warnings import subprocess import itertools import h5py import json import scipy import sys import collections import pandas as pd import scipy.sparse.linalg from six import string_types if sys.version_info > (3, 0): # python 3.x def unicode(s): if isinstance(s, bytes): return s.decode() return s from io import StringIO else: # python 2.x from cStringIO import StringIO unicode = unicode CHROMS_DTYPE = np.dtype('U10') ORIGINS_DTYPE = np.int32 LENGTHS_DTYPE = np.int32 CHROM_DTYPE = np.int32 START_DTYPE = np.int32 END_DTYPE = np.int32 COPY_DTYPE = np.int32 LABEL_DTYPE = np.dtype('U10') CHROM_SIZES_DTYPE = np.int32 COORD_DTYPE = np.float32 RADII_DTYPE = np.float32 # size of buckets (in basepairs) for fast search in index (see index.loc function) BUCKET_SIZE = 1000000 class Genome(object): """ And instance which holds genome information Parameters ---------- assembly : str The name of the genome. e.g. "hg19" or "mm9" chroms : array[str] like, optional List of chromosome names. e.g ['chr1','chr2' ... 'chrX'] origins : array[int] like, optional List of chr region start, default will be arry of zeros lengths : array[int] like, optional List of chr region length. e.g [249250621, 243199373, ...] usechr : array[str] like, optional Specified chromsome to use. e.g. ['#','X'] or ['1','2','3'..'10']\ '#' indicates all autosome chromosomes. Notes ----- If ``chroms`` or ``lengths`` is(are) not specified, genome info will be read from genomes/*.info file Attributes ---------- assembly : str Name of genome chroms : np.array[string10] chromosome name origins : np.array[int32] chromosome region start lengths : np.array[int32] chromosome region length """ def __init__(self, assembly, chroms=None, origins=None, lengths=None, usechr=None, silence=False): # If the first argument is a string, and no other info is specified, # check if we can read from info or hdf5. # Genome assembly name has precedence over hdf5 file names. # Will raise a IOError if cannot read any. if isinstance(assembly, string_types) and ( (chroms is None) or (lengths is None) ): datafile = os.path.join( os.path.dirname(os.path.abspath(__file__)), "genomes/" + assembly + ".info" ) if os.path.isfile(datafile): if not silence: sys.stderr.write("chroms or lengths not given, reading from genomes info file.\n") info = np.genfromtxt(datafile, dtype=[("chroms", CHROMS_DTYPE), ("lengths", LENGTHS_DTYPE)]) else: assembly = h5py.File(assembly, 'r') if isinstance(assembly, Genome): genome = assembly assembly = genome.assembly if chroms is None: chroms = genome.chroms if origins is None: origins = genome.origins if lengths is None: lengths = genome.lengths if isinstance(assembly, h5py.File): chroms = np.array(assembly["genome"]["chroms"][:], CHROMS_DTYPE) origins = assembly["genome"]["origins"] lengths = assembly["genome"]["lengths"] assembly = unicode(assembly["genome"]["assembly"][()]) if (chroms is None) or (lengths is None): chroms = info["chroms"].astype(CHROMS_DTYPE) lengths = info["lengths"].astype(LENGTHS_DTYPE) origins = np.zeros(len(lengths), dtype=ORIGINS_DTYPE) else: if origins is None: origins = np.zeros(len(lengths), dtype=ORIGINS_DTYPE) if len(chroms) != len(lengths) or len(chroms) != len(origins): raise RuntimeError("Dimension of chroms and lengths do not match.") chroms = np.array(chroms, dtype=CHROMS_DTYPE) lengths = np.array(lengths, dtype=LENGTHS_DTYPE) origins = np.array(origins, dtype=ORIGINS_DTYPE) choices = np.zeros(len(chroms), dtype=bool) if usechr is None: # if not specified, use all of them usechr = np.unique(chroms) for chrnum in usechr: if chrnum == '#': choices = np.logical_or([re.search(r"chr[\d]+$",c) != None for c in chroms], choices) else: # if specified with full name, remove chr chrnum = chrnum.replace('chr', '') choices = np.logical_or(chroms == ("chr%s" % chrnum), choices) self.chroms = chroms[choices] # convert to unicode for python2/3 compatibility self.origins = origins[choices] self.lengths = lengths[choices] self.assembly = unicode(assembly) # - def __eq__(self, other): try: return ( np.all(self.chroms == other.chroms) and np.all(self.origins == other.origins) and np.all(self.lengths == other.lengths) and np.all(self.assembly == other.assembly) ) except: return False def bininfo(self, resolution): """ Bin the genome by resolution Parameters ---------- resolution : int resolution of the matrix Return ------ utils.Index instance """ binSize = [int(math.ceil(float(x) / resolution)) for x in self.lengths] chromList = [] binLabel = [] for i in range(len(self.chroms)): chromList += [i for j in range(binSize[i])] binLabel += [j + int(self.origins[i] / resolution) for j in range(binSize[i])] startList = [binLabel[j] * resolution for j in range(sum(binSize))] endList = [binLabel[j] * resolution + resolution for j in range(sum(binSize))] binInfo = Index(chromList, startList, endList, chrom_sizes=binSize, genome=self) return binInfo def getchrnum(self, chrom): findidx = np.flatnonzero(self.chroms == unicode(chrom)) if len(findidx) == 0: return None else: return findidx[0] def getchrom(self, chromNum): assert isinstance(chromNum, (int, np.int32, np.int64)) return self.chroms[chromNum] def __getitem__(self, key): if isinstance(key, (int, np.int32, np.int64)): return self.getchrom(key) def __len__(self): return len(self.chroms) def __repr__(self): represent = "Genome Assembly: " + self.assembly + '\n' for i in range(len(self.chroms)): represent += (self.chroms[i].astype(str) + '\t' + str(self.origins[i]) + '-' + str( self.origins[i] + self.lengths[i]) + '\n') return represent def save(self, h5f, compression="gzip", compression_opts=6): """ Save genome information into a hd5f file handle. The information will be saved as a group of datasets: genome/ |- assembly |- chroms |- origins |- lengths Parameters ---------- h5f : h5py.File object compression : string "gzip" as default compression_opts : int compression level, higher the better """ assert isinstance(h5f, h5py.File) if 'genome' in h5f: ggrp = h5f["genome"] else: ggrp = h5f.create_group("genome") if 'assembly' in ggrp: ggrp['assembly'][...] = self.assembly else: ggrp.create_dataset("assembly", data=self.assembly) if 'chroms' in ggrp: ggrp['chroms'][...] = np.array(self.chroms, dtype='S10') else: ggrp.create_dataset("chroms", data=np.array(self.chroms, dtype='S10'), # hdf5 does not like unicode compression=compression, compression_opts=compression_opts, ) if 'origins' in ggrp: ggrp['origins'][...] = self.origins else: ggrp.create_dataset("origins", data=self.origins, compression=compression, compression_opts=compression_opts) if 'lengths' in ggrp: ggrp['lengths'][...] = self.lengths else: ggrp.create_dataset("lengths", data=self.lengths, compression=compression, compression_opts=compression_opts) # -------------------- class Index(object): """ Matrix/System indexes. Maps matrix bins or model beads to genomic regions. Parameters ---------- chrom : list[int32] or np.ndarray[int] numeric chromosome id (starting from 0) for each bin/bead. Es.: 0 -> chr1, 1 -> chr2, ..., 22 -> chrX start : list[int32] or np.ndarray[int] genomic starting positions of each bin (in bp, with respect to the chromosome start) end : list[int32] or np.ndarray[int] genomic ending positions of each bin (in bp, with respect to the chromosome start) label : list[string10] or np.ndarray[str] label for each bin (usually, 'CEN', 'gap', 'domain', although it can be any string of less than 10 characters) copy : list[int32] or np.ndarray[int], optional In systems of beads, there may be multiple indistinguishable copies of the same chromosome in the system. The copy vector specifies which copy of the chromosome each bead maps to. If not specified, is computed assuming non-contiguous groups of beads with the same `chrom` value belong to different copies. Each bead mapping to the same (chrom, start, end) tuple should, in general, have a different copy value. chrom_sizes : list[int32] or np.ndarray[int], optional number of bins/beads in each chromosome. It is useful to specify it if two copies of the same chromosome appear as contiguous in the index. If not specified, it is automatically computed assuming non-contiguous groups of beads with the same `chrom` value belong to different copies. genome: alabtools.utils.Genome or str, optional genome info for the index. usecols: list of ints if reading from a text file, use only a subset of columns ignore_headers: boolean if True and reading from a text file, ignore any column name specification. Defaults to False. """ def __init__(self, chrom=[], start=[], end=[], label=[], copy=[], chrom_sizes=[], genome=None, usecols=None, ignore_headers=False, **kwargs): self.custom_tracks = [] self.chrom_sizes = chrom_sizes self.genome = genome self.chromstr = None if isinstance(chrom, string_types): try: # tries to load a hdf5 file with h5py.File(chrom, 'r') as f: self.load_h5f(f) except IOError: # if it is not a hdf5 file, try to read as text BED colnames, n_header = self._look_for_bed_header(chrom) if ignore_headers: colnames = None data = np.genfromtxt(chrom, usecols=usecols, dtype=None, encoding=None, skip_header=n_header) n_fields = len(data.dtype.names) if n_fields < 3: raise ValueError('Provided text file does not seem to have at least 3 columns (chrom, start, end).') if colnames: assert len(colnames) == n_fields if usecols: colnames = np.array(colnames)[usecols].tolist() assert (colnames[:3] == ['chrom', 'start', 'end']) else: colnames = ['chrom', 'start', 'end'] + ['track%d' % i for i in range(n_fields - 3)] data.dtype.names = colnames # transform chrom names to integers ids self.chromstr = data['chrom'] if self.genome is None: cmap = {s: i for i, s in enumerate(natural_sort(np.unique(data['chrom'])))} self.chrom = [cmap[c] for c in data['chrom']] else: if not isinstance(self.genome, Genome): self.genome = Genome(self.genome) self.chrom = np.array([self.genome.getchrnum(c) for c in data['chrom']]) # set the variables for further processing self.start = data['start'] self.end = data['end'] if 'label' in data.dtype.names: self.label = data['label'] else: self.label = [] if 'copy' in data.dtype.names: self.copy = data['copy'] else: self.copy = [] # add custom fields as keyword arguments, to be added custom_field_names = set(data.dtype.names) - {'chrom', 'start', 'end', 'copy', 'label'} custom_fields = {n: data[n] for n in custom_field_names} kwargs.update(custom_fields) elif isinstance(chrom, h5py.File): self.load_h5f(chrom) elif isinstance(chrom, Index): index = chrom self.chrom = index.chrom self.start = index.start self.end = index.end self.copy = index.copy self.genome = index.genome else: self.chrom = chrom self.start = start self.end = end self.label = label self.copy = copy # fix datatypes and eventual problems if not (len(self.chrom) == len(self.start) == len(self.end)): raise RuntimeError("Dimensions do not match.") if len(self.chrom): try: int(self.chrom[0]) except: if isinstance(self.chrom[0], string_types): self.chromstr = self.chrom if self.genome is not None: self.chrom = [self.genome.getchrnum(x) for x in self.chromstr] else: cmap = {s: i for i, s in enumerate(natural_sort(np.unique(self.chromstr)))} self.chrom = [cmap[c] for c in self.chromstr] else: raise RuntimeError("chrom should be a list of integers or strings.") if len(self.start) and not isinstance(self.start[0], (int, np.int32, np.int64, np.uint32, np.uint64)): raise RuntimeError("start should be a list of integers.") if len(self.end) and not isinstance(self.end[0], (int, np.int32, np.int64, np.uint32, np.uint64)): raise RuntimeError("end should be list of integers.") self.chrom = np.array(self.chrom, dtype=CHROM_DTYPE) self.start = np.array(self.start, dtype=START_DTYPE) self.end = np.array(self.end, dtype=END_DTYPE) if len(self.copy) != len(self.chrom): self.copy = np.zeros(len(self.chrom), dtype=COPY_DTYPE) self._compute_copy_vec() else: self.copy = np.array(self.copy, dtype=COPY_DTYPE) if len(self.chrom_sizes) == 0 and len(self.chrom) != 0: # chrom sizes have not been computed yet self.chrom_sizes = [ len(list(g)) for _, g in itertools.groupby( zip(self.chrom, self.copy) ) ] self.chrom_sizes = np.array(self.chrom_sizes, dtype=CHROM_SIZES_DTYPE) self.num_chrom = len(self.chrom_sizes) if len(self.label) != len(self.chrom): self.label = np.array(["-"] * len(self.chrom), dtype=LABEL_DTYPE) else: self.label = np.array(self.label, dtype=LABEL_DTYPE) self.offset = np.array([sum(self.chrom_sizes[:i]) for i in range(len(self.chrom_sizes) + 1)]) if not hasattr(self, 'copy_index'): self._compute_copy_index() self._compute_ref_vec() self.loctree = None # add additional attributes for k, v in kwargs.items(): self.add_custom_track(k, v) if self.chromstr is None: if self.genome is not None: self.chromstr = self.genome.chroms[self.chrom] else: self.chromstr = np.array(['chr%d' % (i + 1) for i in self.chrom]) self._map_chrom_id = {} self._map_id_chrom = {} for i, s in zip(self.chrom, self.chromstr): self._map_chrom_id[s] = i self._map_id_chrom[i] = s # - @staticmethod def _look_for_bed_header(file_descriptor): ''' Bed is a somewhat inconsistent format. Apparently can have a header or not, and it starts with `track` or `browser`. But I've seen people using the first line as column names, or using comments (#) to put column names. Here I try to find out if there is a header with column names or not. Far from failproof, but should cover the cases I found. ''' colnames = None n_head = 0 if isinstance(file_descriptor, string_types): file_descriptor = open(file_descriptor, 'r') for line in file_descriptor: line = line.strip() if line: if line.startswith('#'): line = line.replace('#', '') colnames = line.split() else: # if has a bed header, I don't know exactly how to parse it, # so check only the other cases if not (line.startswith('track') or line.startswith('browser')): ss = StringIO(line) x = np.genfromtxt(ss, dtype=None, encoding=None) if x.dtype.kind == 'U': # only unicode string, no integers, maybe a header colnames = x.tolist() else: # in this case, we should be reading the chrom start end version if colnames: # if we found column names, unset them if they are not consistent if len(colnames) != len(line.split()): colnames = None # also, if we have column names, they should start with chrom, start, end elif colnames[:3] != ['chrom', 'start', 'end']: colnames = None break # increase the number of header lines n_head += 1 return colnames, n_head def __eq__(self, other): ''' equality check ''' return ( np.all(self.chrom == other.chrom) and np.all(self.start == other.start) and np.all(self.end == other.end) and np.all(self.label == other.label) and np.all(self.copy == other.copy) ) def __add__(self, other): ''' concatenate indices ''' return Index( np.concatenate([self.chrom, other.chrom]), np.concatenate([self.start, other.start]), np.concatenate([self.end, other.end]), np.concatenate([self.label, other.label]), genome=self.genome ) def resolution(self): ''' Checks if all the region sizes are equal (except for the last region of each chromosome). If they are, returns the only size as a resolution. If a resolution cannot be determined returns None. :return: int or None ''' sizes = self.end - self.start last_beads = np.array([self.offset[i] - 1 for i in range(1, len(self.offset))]) mask = np.ones(len(self), np.bool) mask[last_beads] = False us = np.unique(sizes[mask]) if len(us) == 1: return us[0] return None def chrom_to_id(self, c): return self._map_chrom_id[c] def id_to_chrom(self, c): return self._map_id_chrom[c] def get_chrom_mask(self, c, copy=None): ''' Return the mask relative to a chromosome Parameters ---------- c : str or int the chromosome. To access by string, the Index needs to have the genome member variable set. copy : int or list, optional If specified, select only the specified copy/copies. Returns ------- idx : np.ndarray ''' if isinstance(c, string_types): if self.genome is None: c = int(c.replace('chr', '')) - 1 else: c = self.genome.getchrnum(c) idx = self.chrom == c if copy is not None: idx = np.logical_and(idx, np.isin(self.copy, copy)) return idx def get_chrom_pos(self, c, copy=None): return np.flatnonzero(self.get_chrom_mask(c, copy)) def get_chrom_index(self, c, copy=None): ii = self.get_chrom_pos(c, copy) return Index( self.chrom[ii], self.start[ii], self.end[ii], self.label[ii], self.copy[ii], genome=self.genome ) def get_chrom_copies(self, c): return np.unique(self.copy[self.get_chrom_mask(c)]) def get_chromosomes(self): return pd.unique(self.chrom) def get_chrom_names(self): ''' Returns the unique names of chromosomes in this index ''' return pd.unique(self.chromstr) def __getitem__(self, key): return np.rec.fromarrays((self.chrom[key], self.start[key], self.end[key], self.label[key]), dtype=[("chrom", CHROM_DTYPE), ("start", START_DTYPE), ("end", END_DTYPE), ("label", LABEL_DTYPE)]) def __len__(self): return len(self.chrom) def __repr__(self): return '<alabtools.Index: %d chroms, %d segments>' % ( self.num_chrom, len(self.chrom) ) def add_custom_track(self, k, v, force=False): a = np.array(v) assert (len(a) == len(self)) if k in self.custom_tracks: if not force: raise KeyError('track %s already present' % k) else: self.remove_custom_track(k) self.custom_tracks.append(k) self.__setattr__(k, a) def remove_custom_track(self, k): if (not hasattr(self, k)) or (k not in self.custom_tracks): raise KeyError('track %s not present' % k) self.custom_tracks.remove(k) t = getattr(self, k) delattr(self, k) return t def rename_custom_track(self, k, nk): t = self.remove_custom_track(k) self.add_custom_track(nk, t) def get_custom_track(self, k): return getattr(self, k) def get_sub_index(self, keys): dt = self[keys] custom_tracks = { k: self.__getattribute__(k)[keys] for k in self.custom_tracks } return Index( dt['chrom'], dt['start'], dt['end'], dt['label'], self.copy[keys], genome=self.genome, **custom_tracks ) def get_haploid(self): return self.get_sub_index(self.copy == 0) def _compute_copy_vec(self): ''' Tries to guess the copy vector. It assumes every copy of a chromosome to be sequential and sorted by start/end. When bins have the same chromosome id but are separated in sequence, or the latter starts before the end of the previous one, the two bins are assumed to refer to different copies. ''' if len(self.copy) == 0: return chrom_ids = set(self.chrom) copy_no = {c: -1 for c in chrom_ids} copy_no[self.chrom[0]] = 0 self.copy[0] = 0 for i in range(1, len(self.chrom)): cc = self.chrom[i] if self.chrom[i - 1] != cc or self.start[i] < self.end[i - 1]: copy_no[cc] += 1 self.copy[i] = copy_no[cc] def _compute_copy_index(self): ''' Returns a index of the copies of the same genomic region in form of a dictionary. The key of each unique region is the id of the first bin/bead mapping to it. The values are lists of all the bead id's (including the key) which map to that particular region. In the case of an haploid system, or a contact map, this dictionary is completely trivial, i.e. {i : i for i in range(len(index))}. ''' tmp_index = {} for i, v in enumerate(self): locus = (int(v.chrom), int(v.start), int(v.end)) if locus not in tmp_index: tmp_index[locus] = [i] else: tmp_index[locus] += [i] # use first index as a key self.copy_index = {ii[0]: ii for locus, ii in tmp_index.items()} def _compute_ref_vec(self): ''' Sets a vector of references, the inverse of the copy index ''' self.refs = [None] * len(self) for i, ii in self.copy_index.items(): for j in ii: self.refs[j] = i def load_h5f(self, h5f): self.chrom = h5f["index"]["chrom"][()] self.start = h5f["index"]["start"][()] self.end = h5f["index"]["end"][()] self.copy = h5f["index"]["copy"][()] self.label = np.array(h5f["index"]["label"][()], LABEL_DTYPE) self.chrom_sizes = h5f["index"]["chrom_sizes"][()] if 'genome' in h5f: try: self.genome = Genome(h5f) except: pass try: # try to load the copy index tmp = json.loads(h5f["index"]["copy_index"][()]) # it may happen that in json dump/loading keys are considered # as strings. self.copy_index = {int(k): v for k, v in tmp.items()} except (KeyError, ValueError): pass try: self.chromstr = np.array(h5f["index"]["chromstr"][()], CHROMS_DTYPE) except: pass # tries to load additional data tracks if 'custom_tracks' in h5f["index"]: self.custom_tracks = json.loads(h5f["index"]["custom_tracks"][()]) for k in self.custom_tracks: v = h5f["index"][k][()] if not isinstance(v, np.ndarray): v = np.array(json.loads(v)) setattr(self, k, v) def load_txt(self, f): pass def save(self, h5f, compression="gzip", compression_opts=6): """ Save index information into a hd5f file handle. The information will be saved as a group of datasets: index/ |- chrom |- start |- end |- label |- copy |- chrom_sizes Parameters ---------- h5f : h5py.File object compression : string 'gzip' as default compression_opts : int compression level, higher the better """ assert isinstance(h5f, h5py.File) if 'index' in h5f: igrp = h5f['index'] else: igrp = h5f.create_group("index") if 'chrom' in igrp: igrp['chrom'][...] = self.chrom else: igrp.create_dataset("chrom", data=self.chrom, compression=compression, compression_opts=compression_opts) if 'start' in igrp: igrp['start'][...] = self.start else: igrp.create_dataset("start", data=self.start, compression=compression, compression_opts=compression_opts) if 'end' in igrp: igrp['end'][...] = self.end else: igrp.create_dataset("end", data=self.end, compression=compression, compression_opts=compression_opts) if 'label' in igrp: igrp['label'][...] = np.array(self.label, dtype=np.dtype('S10')) else: igrp.create_dataset("label", data=np.array(self.label, dtype=np.dtype('S10')), compression=compression, compression_opts=compression_opts) if 'copy' in igrp: igrp['copy'][...] = self.copy else: igrp.create_dataset("copy", data=self.copy, compression=compression, compression_opts=compression_opts) if 'chrom_sizes' in igrp: igrp['chrom_sizes'][...] = self.chrom_sizes else: igrp.create_dataset("chrom_sizes", data=self.chrom_sizes, compression=compression, compression_opts=compression_opts) if 'copy_index' in igrp: igrp['copy_index'][...] = json.dumps(self.copy_index) else: # scalar datasets don't support compression igrp.create_dataset("copy_index", data=json.dumps(self.copy_index)) if 'chromstr' in igrp: igrp['chromstr'][...] = np.array(self.chromstr, dtype=np.dtype('S10')) else: # scalar datasets don't support compression igrp.create_dataset("chromstr", data=np.array(self.chromstr, dtype=np.dtype('S10'))) for k in self.custom_tracks: if k in igrp: try: igrp[k][...] = self.__getattribute__(k) except: igrp[k][...] = json.dumps(self.__getattribute__(k).tolist()) else: # scalar datasets don't support compression try: igrp.create_dataset(k, data=self.__getattribute__(k)) except: igrp.create_dataset(k, data=json.dumps(self.__getattribute__(k).tolist())) if 'custom_tracks' in igrp: igrp['custom_tracks'][...] = json.dumps(self.custom_tracks) else: # scalar datasets don't support compression igrp.create_dataset("custom_tracks", data=json.dumps(self.custom_tracks)) if self.genome and "genome" not in h5f: self.genome.save(h5f) h5f.flush() def dump_csv(self, file=sys.stdout, include=None, exclude=None, header=True, header_style='comment', sep=";", cut_chrom_ends=False): if isinstance(file, str): file = open(file, 'w') if include is None: include = ['chrom', 'start', 'end', 'label', 'copy'] + self.custom_tracks else: include = ['chrom', 'start', 'end'] + include if exclude is not None: for e in exclude: if e in include: include.remove(e) if header: if header_style == 'comment': file.write('# ') file.write(sep.join(include) + '\n') for i in range(len(self)): if cut_chrom_ends: save_end = self.end[i] if self.end[i] > self.genome.lengths[self.chrom[i]]: self.end[i] = self.genome.lengths[self.chrom[i]] file.write( sep.join([ str(self.__getattribute__(col)[i]) if col != 'chrom' else self.chromstr[i] for col in include ]) + '\n' ) if cut_chrom_ends: self.end[i] = save_end def dump_bed(self, file=sys.stdout, include=None, exclude=None, header=True, cut_chrom_ends=False): self.dump_csv(file, include, exclude, header, sep='\t', cut_chrom_ends=cut_chrom_ends) def loc(self, chrom, start, end=None, copy=None): ''' Get the indexes corresponding to the specified genomic location or segment. Parameters ---------- chrom (str or int) : chromosome name or 0 based number start (int) : position or starting position of a region (basepairs) end (int, optional) : ending position of the region copy (int or list, optional) : consider only the specified copies of the chromosome Returns ------- np.ndarray[int] : an array containing the positions of index's regions which overlap with the input region. ''' if self.loctree is None: self.loctree = LocStruct(self) if end is None: buckets = self.loctree[chrom][start // BUCKET_SIZE: start // BUCKET_SIZE + 1] else: buckets = self.loctree[chrom][start // BUCKET_SIZE: (end - 1) // BUCKET_SIZE + 1] locs = [np.array([], dtype=int)] for bucket in buckets: locs.append(bucket.get_intersections(start, end)) locs = np.concatenate(locs) if copy is not None: if not isinstance(copy, collections.Iterable): copy = [copy] locs = [i for i in locs if self.copy[i] in copy] return np.sort(np.unique(locs)) # -------------------- def loadstream(filename): """ Convert a file location, return a file handle zipped file are automaticaly unzipped using stream """ if not os.path.isfile(filename): raise IOError("File %s doesn't exist!\n" % (filename)) if os.path.splitext(filename)[1] == '.gz': p = subprocess.Popen(["zcat", filename], stdout=subprocess.PIPE) f = StringIO(p.communicate()[0]) elif os.path.splitext(filename)[1] == '.bz2': p = subprocess.Popen(["bzip2 -d", filename], stdout=subprocess.PIPE) f = StringIO(p.communicate()[0]) else: f = open(filename, 'r') return f def make_diploid(index): didx = {} for k in ['chrom', 'start', 'end', 'label']: didx[k] = np.concatenate([index.__dict__[k], index.__dict__[k]]) didx['copy'] = np.concatenate([index.__dict__['copy'], index.__dict__['copy'] + 1]) return Index(didx['chrom'], didx['start'], didx['end'], label=didx['label'], copy=didx['copy']) def make_multiploid(index, chroms, copies): ''' Returns a multiploid index mapping based on the input index. The index is ordered by the copy id and then by the input chrom order. For example, using chroms=[0, 1, 2] and copies=[3, 2, 1], the output index chromosomes will be in this order: [0, 1, 2, 0, 1, 0]. Parameters ---------- index (alabtools.utils.Index): Haploid input index chroms (array[int] like): the chromosomes ids to be included in the output index copy_number (array[int] like): the number of copies for each of the chromosomes in `chrom` Returns ------- alabtools.utils.Index: a index with `copies[i]` copies of the `chroms[i]` chromosome, for each element in `chroms`. Examples -------- # human genome assembly g = Genome('hg19', usechr=['#', 'X', 'Y']) # get the index at 1MB resolution idx = g.bininfo(1000000) # obtain the index for a 1MB resolution male diploid cell # i.e. 2 copies for chromosomes 1-22 and 1 each for chromosomes X and Y num_copies = [2]*22 + [1, 1] idx = make_multiploid(idx, range(24), num_copies) ''' nchrom = [] nstart = [] nend = [] ncopy = [] nlabel = [] csizes = [] add_tracks = {k: [] for k in index.custom_tracks} for z in range(max(copies)): for cid, cnum in zip(chroms, copies): if z < cnum: idxs = np.where(index.chrom == cid)[0] nchrom.append(index.chrom[idxs]) nstart.append(index.start[idxs]) nend.append(index.end[idxs]) nlabel.append(index.label[idxs]) ncopy.append(np.array([z] * len(idxs))) csizes.append(len(idxs)) for k in index.custom_tracks: add_tracks[k].append(index.get_custom_track(k)[idxs]) nchrom = np.concatenate(nchrom) nstart = np.concatenate(nstart) nend = np.concatenate(nend) ncopy = np.concatenate(ncopy) nlabel = np.concatenate(nlabel) for k in index.custom_tracks: add_tracks[k] = np.concatenate(add_tracks[k]) return Index(nchrom, nstart, nend, copy=ncopy, label=nlabel, chrom_sizes=csizes, **add_tracks) def natural_sort(l): convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key) def get_index_from_bed( file, genome=None, usecols=None, ): return Index(file, genome, usecols) _ftpi = 4. / 3. * np.pi DEFAULT_NUCLEAR_VOLUME = _ftpi * (5000 ** 3) def compute_radii(index, occupancy=0.2, volume=DEFAULT_NUCLEAR_VOLUME): sizes = [b.end - b.start for b in index] totsize = sum(sizes) prefactor = volume * occupancy / (_ftpi * totsize) rr = [(prefactor * sz) ** (1. / 3.) for sz in sizes] return np.array(rr, dtype=RADII_DTYPE) # if we are dealing with h5py datasets, preloads data for performance class H5Batcher(): def __init__(self, ds, bsize): self.ds = ds self.n = ds.shape[0] self.b = 0 self.bsize = bsize self.batch = ds[0:min(bsize, self.n)] def __getitem__(self, key): if key > self.n: raise KeyError() z = key // self.bsize if z != self.b: self.b = z self.batch = self.ds[ self.bsize * z: min(self.n, self.bsize * (z + 1)) ][()] return self.batch[key % self.bsize] def __len__(self): return self.ds.__len__() def remap(s0, s1): ''' Creates forward and backward maps between two segmentations, assuming no gaps in the mapping: each segment will have at least one corresponding partner in the other segmentation. Assumes the two segmentations have the same starting and ending points. Ugly stuff can happen if this is not the case. Parameters ---------- s0, s1 : list of ints the boundaries of two segmentations, including start and end positions. A segmentation in N segments will have N+1 boundary positions. Returns ------- dmap : list of lists of ints the map from s0 to s1. Each element dmap[ i ], 0 <= i < N holds the indexes of the segments on s1 corresponding to the i-th segment on s0. Each segment in s0 is guarantee to have at least one corresponding segment in s1. imap : list of lists of ints the inverse mapping from s1 to s0. ''' dmap = [list() for _ in range(len(s0) - 1)] imap = [list() for _ in range(len(s1) - 1)] # Define an overlap score to generalize the choice of corresponding # elements when boundaries do not match. Simply the size of the # overlapping section def overlap(i, j): b = max(s0[i], s1[j]) e = min(s0[i + 1], s1[j + 1]) return max(0, e - b) N = len(s0) - 1 M = len(s1) - 1 # the first elements are necessarily mapped one to the other i = 0 j = 0 while True: dmap[i].append(j) imap[j].append(i) if i == N - 1 and j == M - 1: break # if we get to the end of a segmentation, assign all the rest of # the other one to this last element if j == M - 1: i += 1 elif i == N - 1: j += 1 else: # decide on which sequence(s) to advance based on overlap v = [overlap(i + 1, j), overlap(i, j + 1), overlap(i + 1, j + 1)] k = v.index(max(v)) if k == 0 or k == 2: i += 1 if k == 1 or k == 2: j += 1 return dmap, imap def get_index_mappings(idx0, idx1): # make sure that the two subdivisions map the same chromosomes assert idx0.num_chrom == idx1.num_chrom n_chrom = idx0.num_chrom # get the starting index for each chromosome cc0 = idx0.offset cc1 = idx1.offset # get the mapping for each chromosome cmap, fwmap, bwmap = [], [], [] for i in range(n_chrom): # create the arrays of boundaries v0 = np.concatenate([idx0.start[cc0[i]:cc0[i + 1]], [idx0.end[cc0[i + 1] - 1]]]) v1 = np.concatenate([idx1.start[cc1[i]:cc1[i + 1]], [idx1.end[cc1[i + 1] - 1]]]) # get the maps chromosome by chromosome dm, im = remap(v0, v1) cmap.append([dm, im]) # the full mapping needs to add the chromosome offset # to every element (note, elements in direct map refer to # the second segmentation, and viceversa) fwmap += [ [ x + idx1.offset[i] for x in row ] for row in dm ] bwmap += [ [ x + idx0.offset[i] for x in row ] for row in im ] return cmap, fwmap, bwmap def region_intersect(b, e, x, y): a = x < e b = y > b return (a and b) def region_intersect_one(b, e, x, y): return (x >= b) and (x < e) class Node: """ Class Node """ def __init__(self, value=None): self.left = None self.data = value self.right = None class BucketTree: def __init__(self, ids, starts, ends): self.root = Node() # sort by start, end, id self.fill(self.root, ids, starts, ends) @staticmethod def fill(node, ids, starts, ends): n = len(starts) if n == 0: return elif n == 1: node.data = (n, ids[0], starts[0], ends[0]) else: i = n // 2 node.data = (n, None, starts[0], ends[-1]) node.left = Node() node.right = Node() BucketTree.fill(node.left, ids[:i], starts[:i], ends[:i]) BucketTree.fill(node.right, ids[i:], starts[i:], ends[i:]) @staticmethod def _traverse_tree(node, ids, x, y, f=region_intersect): if node.data is None: return n, i, s, e = node.data if f(s, e, x, y): if n == 1: ids.append(i) else: BucketTree._traverse_tree(node.left, ids, x, y, f) BucketTree._traverse_tree(node.right, ids, x, y, f) def get_intersections(self, x, y=None): ids = [] if y is None: self._traverse_tree(self.root, ids, x, y, region_intersect_one) else: self._traverse_tree(self.root, ids, x, y) return ids class BucketLinear: def __init__(self, ids, starts, ends): self.starts = np.array(starts, dtype=int) self.ends = np.array(ends, dtype=int) self.ids = np.array(ids, dtype=int) def get_intersections(self, x, y=None): if y is None: ii = (self.starts <= x) & (self.ends > x) else: ii = (self.ends > x) & (self.starts < y) return self.ids[ii] class LocStruct: def __init__(self, index): self.chroms = {} chromids = index.get_chromosomes() chroms = index.get_chrom_names() for cid, c in zip(chromids, chroms): ii = index.get_chrom_pos(cid) ind = np.lexsort((index.start[ii], index.end[ii], ii)) starts, ends, ids = index.start[ii][ind], index.end[ii][ind], ii[ind] nbuckets = ends[-1] // BUCKET_SIZE + 1 bS = [list() for _ in range(nbuckets)] bE = [list() for _ in range(nbuckets)] bI = [list() for _ in range(nbuckets)] for s, e, i in zip(starts, ends, ids): i0, i1 = s // BUCKET_SIZE, (e - 1) // BUCKET_SIZE for k in range(i0, i1 + 1): bS[k].append(s) bE[k].append(e) bI[k].append(i) self.chroms[c] = list() for i in range(nbuckets): self.chroms[c].append(BucketLinear(bI[i], bS[i], bE[i])) self.chroms[cid] = self.chroms[c] def __getitem__(self, c): return self.chroms[c] def underline(*args, **kwargs): ''' Underlines a string.Takes a variable number of unnamed arguments, and the final string is assembled like in the print function. The width of the under-line matches the longest line in the output (or the terminal width, if `terminal` is set to True) Keyword arguments ----------------- char : a character or a string to use to underline terminal : if set to True, limits the line width to the terminal ''' import re char = kwargs.pop('char', '-') terminal = kwargs.pop('terminal', False) s = ' '.join([str(a) if hasattr(a, '__str__') else repr(a) for a in args]) ss = re.split('[\\r\\n]+', s) l = max([len(x) for x in ss]) if terminal: import shutil c, r = shutil.get_terminal_size() l = min(l, c) u = (char * (l // len(char) + 1))[:l] return s + '\n' + u def block_transpose(x1, x2, max_items=int(1e8)): ''' Transposes a matrix in blocks smaller than max_items. Parameters ---------- x1: ndarray-like input matrix x2: indarray-like output matrix max_items: int maximum number of matrix elements to be transposed at once ''' n = x1.shape[0] k = max(max_items // n, 1) for i in range(0, n, k): s = min(k, n - i) block = x1[i:i + s].swapaxes(0, 1) # get a subset and transpose in memory x2[:, i:i + s] = block def isSymmetric(x): return np.all(x.T == x) # See details in Imakaev et al. (2012) def PCA(A, numPCs=6, verbose=False): """performs PCA analysis, and returns 6 best principal components result[0] is the first PC, etc""" # A = np.array(A, float) if np.sum(np.sum(A, axis=0) == 0) > 0: warnings.warn("Columns with zero sum detected. Use zeroPCA instead") M = (A - np.mean(A.T, axis=1)).T covM = np.dot(M, M.T) [latent, coeff] = scipy.sparse.linalg.eigsh(covM, numPCs) if verbose: print("Eigenvalues are:", latent) return (np.transpose(coeff[:, ::-1]), latent[::-1]) def EIG(A, numPCs=3): """Performs mean-centered engenvector expansion result[0] is the first EV, etc.; by default returns 3 EV """ # A = np.array(A, float) if np.sum(np.sum(A, axis=0) == 0) > 0: warnings.warn("Columns with zero sum detected. Use zeroEIG instead") M = (A - np.mean(A)) # subtract the mean (along columns) if isSymmetric(A): [latent, coeff] = scipy.sparse.linalg.eigsh(M, numPCs) else: [latent, coeff] = scipy.sparse.linalg.eigs(M, numPCs) alatent = np.argsort(np.abs(latent)) print("eigenvalues are:", latent[alatent]) coeff = coeff[:, alatent] return (np.transpose(coeff[:, ::-1]), latent[alatent][::-1]) def zeroPCA(data, numPCs=3, verbose=False): """ PCA which takes into account bins with zero counts """ nonzeroMask = np.sum(data, axis=0) > 0 data = data[nonzeroMask] data = data[:, nonzeroMask] PCs = PCA(data, numPCs, verbose) PCNew = [np.zeros(len(nonzeroMask), dtype=float) for _ in PCs[0]] for i in range(len(PCs[0])): PCNew[i][nonzeroMask] = PCs[0][i] return PCNew, PCs[1] def zeroEIG(data, numPCs=3): """ Eigenvector expansion which takes into account bins with zero counts """ nonzeroMask = np.sum(data, axis=0) > 0 data = data[nonzeroMask] data = data[:, nonzeroMask] PCs = EIG(data, numPCs) PCNew = [np.zeros(len(nonzeroMask), dtype=float) for _ in PCs[0]] for i in range(len(PCs[0])): PCNew[i][nonzeroMask] = PCs[0][i] return PCNew, PCs[1] def isiterable(a): try: for _ in a: break except: return False return True def spline_4p(t, p): """ Catmull-Rom """ # wikipedia Catmull-Rom -> Cubic_Hermite_spline # 0 -> p0, 1 -> p1, 1/2 -> (- p_1 + 9 p0 + 9 p1 - p2) / 16 # assert 0 <= t <= 1 return ( t * ((2 - t) * t - 1) * p[0] + (t * t * (3 * t - 5) + 2) * p[1] + t * ((4 - 3 * t) * t + 1) * p[2] + (t - 1) * t * t * p[3] ) / 2 class CatmullRomSpline: """ Computes a Catmull-Rom spline curve on n-dimensional points. Optionally, one can specify the value of the curve parameter of each point, usually (but not necessarily) in the range [0, 1]. This is useful for curves which are not equally sampled, for example. When a value of curve parameters outside the extremes of chain_positions (0 and 1 by default) is provided, a linear interpolation using the two last points is used. Parameters ---------- points: ndarray list of points defining the curve chain_positions: array, optional list of curve parameters for each point. If not specified, points are assumed to be equally spaced Notes ----- Since spacing between points can be uneven, a search must be done for each call, hurting performance. Example ------- points = np.random.random((10, 3)) cs = CatmullRomSpline(points) resampled_points = np.array([ cs(x) for x in np.linspace(0, 1, 100)]) """ def __init__(self, points, chain_positions=None): points = np.array(points) # create extension points at beginning and end vb = (points[0] - points[1]) * 0.1 ve = (points[-1] - points[-2]) * 0.1 if chain_positions is None: chain_positions = np.linspace(0, 1, len(points)) points = np.concatenate([ [points[0] + vb], points, [points[-1] + ve] ]) self.points = points self.pos = chain_positions self.steps = np.diff(self.pos, 1) # try to make average case scenario search faster by creating buckets mean_size = self.steps.mean() n_buckets = int((self.pos[-1] - self.pos[0]) / mean_size) + 1 self._buckets = [list() for _ in range(n_buckets)] for z in range(1, len(self.pos)): start = int((self.pos[z - 1] - self.pos[0]) / mean_size) end = int((self.pos[z] - self.pos[0]) / mean_size) for i in range(start, end + 1): self._buckets[i].append(z - 1) self._bucket_step = mean_size def __call__(self, t): if t < self.pos[0]: q = (self.pos[0] - t) / self.steps[0] return self.points[1] + (self.points[1] - self.points[2]) * q elif t >= self.pos[-1]: q = (t - self.pos[-1]) / self.steps[-1] return self.points[-2] + (self.points[-2] - self.points[-3]) * q else: k = int((t - self.pos[0]) / self._bucket_step) a = self.pos[self._buckets[k]] ib = np.searchsorted(a, t, side='right') - 1 # left interval boundary i = self._buckets[k][ib] q = (t - self.pos[i]) / self.steps[i] return spline_4p(q, self.points[i:i + 4])
gpl-3.0
vermouthmjl/scikit-learn
examples/applications/plot_out_of_core_classification.py
32
13829
""" ====================================================== Out-of-core classification of text documents ====================================================== This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn't fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch. The dataset used in this example is Reuters-21578 as provided by the UCI ML repository. It will be automatically downloaded and uncompressed on first run. The plot represents the learning curve of the classifier: the evolution of classification accuracy over the course of the mini-batches. Accuracy is measured on the first 1000 samples, held out as a validation set. To limit the memory consumption, we queue examples up to a fixed amount before feeding them to the learner. """ # Authors: Eustache Diemert <eustache@diemert.fr> # @FedericoV <https://github.com/FedericoV/> # License: BSD 3 clause from __future__ import print_function from glob import glob import itertools import os.path import re import tarfile import time import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from sklearn.externals.six.moves import html_parser from sklearn.externals.six.moves import urllib from sklearn.datasets import get_data_home from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import SGDClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import Perceptron from sklearn.naive_bayes import MultinomialNB def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() ############################################################################### # Reuters Dataset related routines ############################################################################### class ReutersParser(html_parser.HTMLParser): """Utility class to parse a SGML file and yield documents one at a time.""" def __init__(self, encoding='latin-1'): html_parser.HTMLParser.__init__(self) self._reset() self.encoding = encoding def handle_starttag(self, tag, attrs): method = 'start_' + tag getattr(self, method, lambda x: None)(attrs) def handle_endtag(self, tag): method = 'end_' + tag getattr(self, method, lambda: None)() def _reset(self): self.in_title = 0 self.in_body = 0 self.in_topics = 0 self.in_topic_d = 0 self.title = "" self.body = "" self.topics = [] self.topic_d = "" def parse(self, fd): self.docs = [] for chunk in fd: self.feed(chunk.decode(self.encoding)) for doc in self.docs: yield doc self.docs = [] self.close() def handle_data(self, data): if self.in_body: self.body += data elif self.in_title: self.title += data elif self.in_topic_d: self.topic_d += data def start_reuters(self, attributes): pass def end_reuters(self): self.body = re.sub(r'\s+', r' ', self.body) self.docs.append({'title': self.title, 'body': self.body, 'topics': self.topics}) self._reset() def start_title(self, attributes): self.in_title = 1 def end_title(self): self.in_title = 0 def start_body(self, attributes): self.in_body = 1 def end_body(self): self.in_body = 0 def start_topics(self, attributes): self.in_topics = 1 def end_topics(self): self.in_topics = 0 def start_d(self, attributes): self.in_topic_d = 1 def end_d(self): self.in_topic_d = 0 self.topics.append(self.topic_d) self.topic_d = "" def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/' 'reuters21578-mld/reuters21578.tar.gz') ARCHIVE_FILENAME = 'reuters21578.tar.gz' if data_path is None: data_path = os.path.join(get_data_home(), "reuters") if not os.path.exists(data_path): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) os.mkdir(data_path) def progress(blocknum, bs, size): total_sz_mb = '%.2f MB' % (size / 1e6) current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6) if _not_in_sphinx(): print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb), end='') archive_path = os.path.join(data_path, ARCHIVE_FILENAME) urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): print('\r', end='') print("untarring Reuters dataset...") tarfile.open(archive_path, 'r:gz').extractall(data_path) print("done.") parser = ReutersParser() for filename in glob(os.path.join(data_path, "*.sgm")): for doc in parser.parse(open(filename, 'rb')): yield doc ############################################################################### # Main ############################################################################### # Create the vectorizer and limit the number of features to a reasonable # maximum vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18, non_negative=True) # Iterator over parsed Reuters SGML files. data_stream = stream_reuters_documents() # We learn a binary classification between the "acq" class and all the others. # "acq" was chosen as it is more or less evenly distributed in the Reuters # files. For other datasets, one should take care of creating a test set with # a realistic portion of positive instances. all_classes = np.array([0, 1]) positive_class = 'acq' # Here are some classifiers that support the `partial_fit` method partial_fit_classifiers = { 'SGD': SGDClassifier(), 'Perceptron': Perceptron(), 'NB Multinomial': MultinomialNB(alpha=0.01), 'Passive-Aggressive': PassiveAggressiveClassifier(), } def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics']) for doc in itertools.islice(doc_iter, size) if doc['topics']] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int) def iter_minibatches(doc_iter, minibatch_size): """Generator of minibatches.""" X_text, y = get_minibatch(doc_iter, minibatch_size) while len(X_text): yield X_text, y X_text, y = get_minibatch(doc_iter, minibatch_size) # test data statistics test_stats = {'n_test': 0, 'n_test_pos': 0} # First we hold out a number of examples to estimate accuracy n_test_documents = 1000 tick = time.time() X_test_text, y_test = get_minibatch(data_stream, 1000) parsing_time = time.time() - tick tick = time.time() X_test = vectorizer.transform(X_test_text) vectorizing_time = time.time() - tick test_stats['n_test'] += len(y_test) test_stats['n_test_pos'] += sum(y_test) print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test))) def progress(cls_name, stats): """Report progress information, return a string.""" duration = time.time() - stats['t0'] s = "%20s classifier : \t" % cls_name s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats s += "accuracy: %(accuracy).3f " % stats s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration) return s cls_stats = {} for cls_name in partial_fit_classifiers: stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(), 'runtime_history': [(0, 0)], 'total_fit_time': 0.0} cls_stats[cls_name] = stats get_minibatch(data_stream, n_test_documents) # Discard test set # We will feed the classifier with mini-batches of 1000 documents; this means # we have at most 1000 docs in memory at any time. The smaller the document # batch, the bigger the relative overhead of the partial fit methods. minibatch_size = 1000 # Create the data_stream that parses Reuters SGML files and iterates on # documents as a stream. minibatch_iterators = iter_minibatches(data_stream, minibatch_size) total_vect_time = 0.0 # Main loop : iterate on mini-batches of examples for i, (X_train_text, y_train) in enumerate(minibatch_iterators): tick = time.time() X_train = vectorizer.transform(X_train_text) total_vect_time += time.time() - tick for cls_name, cls in partial_fit_classifiers.items(): tick = time.time() # update estimator with examples in the current mini-batch cls.partial_fit(X_train, y_train, classes=all_classes) # accumulate test accuracy stats cls_stats[cls_name]['total_fit_time'] += time.time() - tick cls_stats[cls_name]['n_train'] += X_train.shape[0] cls_stats[cls_name]['n_train_pos'] += sum(y_train) tick = time.time() cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test) cls_stats[cls_name]['prediction_time'] = time.time() - tick acc_history = (cls_stats[cls_name]['accuracy'], cls_stats[cls_name]['n_train']) cls_stats[cls_name]['accuracy_history'].append(acc_history) run_history = (cls_stats[cls_name]['accuracy'], total_vect_time + cls_stats[cls_name]['total_fit_time']) cls_stats[cls_name]['runtime_history'].append(run_history) if i % 3 == 0: print(progress(cls_name, cls_stats[cls_name])) if i % 3 == 0: print('\n') ############################################################################### # Plot results ############################################################################### def plot_accuracy(x, y, x_legend): """Plot accuracy as a function of x.""" x = np.array(x) y = np.array(y) plt.title('Classification accuracy as a function of %s' % x_legend) plt.xlabel('%s' % x_legend) plt.ylabel('Accuracy') plt.grid(True) plt.plot(x, y) rcParams['legend.fontsize'] = 10 cls_names = list(sorted(cls_stats.keys())) # Plot accuracy evolution plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with #examples accuracy, n_examples = zip(*stats['accuracy_history']) plot_accuracy(n_examples, accuracy, "training examples (#)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with runtime accuracy, runtime = zip(*stats['runtime_history']) plot_accuracy(runtime, accuracy, 'runtime (s)') ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') # Plot fitting times plt.figure() fig = plt.gcf() cls_runtime = [] for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['total_fit_time']) cls_runtime.append(total_vect_time) cls_names.append('Vectorization') bar_colors = ['b', 'g', 'r', 'c', 'm', 'y'] ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=10) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Training Times') def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, '%.4f' % height, ha='center', va='bottom') autolabel(rectangles) plt.show() # Plot prediction times plt.figure() cls_runtime = [] cls_names = list(sorted(cls_stats.keys())) for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['prediction_time']) cls_runtime.append(parsing_time) cls_names.append('Read/Parse\n+Feat.Extr.') cls_runtime.append(vectorizing_time) cls_names.append('Hashing\n+Vect.') ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=8) plt.setp(plt.xticks()[1], rotation=30) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Prediction Times (%d instances)' % n_test_documents) autolabel(rectangles) plt.show()
bsd-3-clause
chrsrds/scikit-learn
benchmarks/bench_plot_svd.py
11
2895
"""Benchmarks of Singular Value Decomposition (Exact and Approximate) The data is mostly low rank but is a fat infinite tail. """ import gc from time import time import numpy as np from collections import defaultdict from scipy.linalg import svd from sklearn.utils.extmath import randomized_svd from sklearn.datasets.samples_generator import make_low_rank_matrix def compute_bench(samples_range, features_range, n_iter=3, rank=50): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') X = make_low_rank_matrix(n_samples, n_features, effective_rank=rank, tail_strength=0.2) gc.collect() print("benchmarking scipy svd: ") tstart = time() svd(X, full_matrices=False) results['scipy svd'].append(time() - tstart) gc.collect() print("benchmarking scikit-learn randomized_svd: n_iter=0") tstart = time() randomized_svd(X, rank, n_iter=0) results['scikit-learn randomized_svd (n_iter=0)'].append( time() - tstart) gc.collect() print("benchmarking scikit-learn randomized_svd: n_iter=%d " % n_iter) tstart = time() randomized_svd(X, rank, n_iter=n_iter) results['scikit-learn randomized_svd (n_iter=%d)' % n_iter].append(time() - tstart) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(2, 1000, 4).astype(np.int) features_range = np.linspace(2, 1000, 4).astype(np.int) results = compute_bench(samples_range, features_range) label = 'scikit-learn singular value decomposition benchmark results' fig = plt.figure(label) ax = fig.gca(projection='3d') for c, (label, timings) in zip('rbg', sorted(results.items())): X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, color=c) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.legend() plt.show()
bsd-3-clause
doct-rubens/predator-prey-beetle-wasp
simul/world.py
1
11507
# -*- coding: utf-8 -*- # # World class file. A world is composed of: # - an initial number of flies and moths # - a set of laws that control the relation between its creatures # - how its creatures interact with each other for each time step (day) # - a support structure that allows the data of each time step to be saved # and later visualised # # Its main objective is to be able to: # - simulate the interaction of its creatures for a given number of steps # - be able to perform multiple simulations (not only once) import numpy as np import pandas as pd from simul.creatures import Creature from simul.creatures import Moth from simul.creatures import Fly class WonderfulWorld: # the class instantiation creates the world up to the 4th creation day only: not habitated; # the 5th-6th creation days come only when we call the run_world() method, # from its beginning to its end def __init__(self, universe, fil=None, mil=None): self.universe = universe self.instant = 0 self.n_moths = 0 self.n_flies = 0 # indexes the universe applied to the world to be the one # globally applied to all creatures Creature.universe = universe # initializes the simulation variables self.creatures = {Moth: [], Fly: []} self.children = {Moth: [], Fly: []} # initializes the data-saving variables self.iteration_data = {Moth: [], Fly: []} self.initial_lifespan = {Fly: fil, Moth: mil} # # initializes the world with: # - uniform distributions for the initial ages of moths and flies # with limits defined by the universe # - resets the current instant # - def initialize_world(self, n_steps): self.instant = 0 # reset the list of caterpillars, if it wasn't already empty del Moth.caterpillars[:] # initializes: # - ages based on a uniform distribution (for the moths) # '' 2 living days before their death (implemented # internally, for the flies) # - genders following the universe's male/female ratios self.creatures = { Moth: [Moth(0, age=np.random.randint(low=self.universe.initial_age_min[Moth], high=self.universe.initial_age_max[Moth] + 1), initial_lifespan=self.initial_lifespan[Moth]) for _ in range(self.n_moths)], Fly: [Fly(0, age=np.random.randint(low=self.universe.initial_age_min[Fly], high=self.universe.initial_age_max[Fly] + 1), initial_lifespan=self.initial_lifespan[Fly]) for _ in range(self.n_flies)] } # resets the newborn creatures arrays self.children = {Moth: [], Fly: []} # initializes the output log with the first # values for the creatures' features self.reset_iteration_log(n_steps) self.initialize_log() # self.save_iteration_log() # # kills the current creature. Previously, it automatically removed the # killed creature from the creatures list. But, that messed up the creature # processing, so we removed it and we clean up the fields only after the day ended # (we clean up the creatures lists only after we finished processing all creatures) @staticmethod def kill(creature): creature.kill() # # same thing that occurred with the creature killing method. We add the newborn # creatures only after we process all creatures from the current iteration. # Until then, the children is saved on a holding structure, the 'children' lists def procreate(self, creature): children = creature.children(gen=(self.instant + 1)) self.children[type(creature)] += children # self.creatures[type(creature)] += children # we update our iteration data (that we want to visualise when the simulation # ends) with these new numbers self.iteration_data[type(creature)]['parents'][self.instant] += 1 self.iteration_data[type(creature)]['newborn'][self.instant] += len(children) # # Checks if a creature should randomly die. If yes, kills it and # updates the iteration data. Returns a boolean variable indicating # whether the creature actually died or not def random_death(self, creature): if creature.random_death(): self.iteration_data[type(creature)]['randomly_killed'][self.instant] += 1 self.kill(creature) return True else: return False # # Checks if a creature should die of old age. If yes, kills it and # updates the iteration data. Returns a boolean variable indicating # whether the creature actually died or not def old_age_death(self, creature): if creature.old_age_death(): self.iteration_data[type(creature)]['old_age_killed'][self.instant] += 1 self.kill(creature) return True else: return False # # calculates the chances of a predation happening. It takes into # account the ratio (#caterpillars / #flies) multiplied by a # predefined (on the universe) coefficient def predation_happens(self): if self.creatures[Fly]: return np.random.uniform() < (self.universe.predation_coefficient * len(Moth.caterpillars) / len(self.creatures[Fly])) else: return False # # predation method. All of the verifications have been already # made and a predation is imminent on our system. We updated # the output data, sort out one of the caterpillars (there will # always be at least one, since the number of caterpillars goes into # the predation verification) and we kill it. # # Afterwards, we procreate the fly that just performed the predation. def predation(self, fly): self.iteration_data[Fly]['predation'][self.instant] += 1 self.iteration_data[Moth]['dead'][self.instant] += 1 # get the lucky bastard (caterpillars) by its horns lucky_caterpillar = Moth.caterpillars[np.random.randint(low=0, high=len(Moth.caterpillars))] # kill 'em self.kill(lucky_caterpillar) # new baby flies are born self.procreate(fly) # Checks what happened on the transition between the previous instant # (yesterday) and the current instant (today). def single_step(self): # we reset the iteration log and update the current instant # self.reset_iteration_log() self.instant = self.instant + 1 # fly stuff: # > random death # > death by old age # - with its last breath, it parasited a moth # (or not, we roll the dice to check) # > nothing happens bean stew (increment age) for fly in self.creatures[Fly]: if not self.random_death(fly): if self.old_age_death(fly): if fly.can_procreate(): if self.predation_happens(): self.predation(fly) else: fly.increment_age() self.log_creature(fly) # update the flies and remove the moth corpses from the field before # checking on them self.update_list(Fly) self.update_list(Moth) # moth stuff: # > see if it died randomly # > see if died of old age # - if it was female and fertile, procreates on death # > nothing happens bean stew (increment age) # ACHO QUE TÁ FALATANDO A MORTE POR PREDAÇÃO AQUI for moth in self.creatures[Moth]: if not self.random_death(moth): if self.old_age_death(moth): if moth.can_procreate(): self.procreate(moth) else: moth.increment_age() self.log_creature(moth) self.update_list(Moth) # self.save_iteration_log() # removes the dead and insert the newborn creatures on the lists def update_list(self, creature_type): self.creatures[creature_type][:] = [creature for creature in self.creatures[creature_type] if creature.is_alive()] + self.children[creature_type] self.children[creature_type] = [] # save the useful data on a dataframe for each generation def log_creature(self, creature): self.iteration_data[type(creature)]['living'][self.instant] += creature.is_alive() self.iteration_data[type(creature)]['dead'][self.instant] += creature.is_dead() self.iteration_data[type(creature)]['male'][self.instant] += creature.gender == 'm' self.iteration_data[type(creature)]['female'][self.instant] += creature.gender == 'f' self.iteration_data[type(creature)]['caterpillars'][self.instant] += (creature.is_caterpillar() and creature.is_alive()) self.iteration_data[type(creature)]['adults'][self.instant] += creature.is_adult() # resets the iteration log def reset_iteration_log(self, n_steps): # A SINGLE NP.ZEROS() OBJECT IS BEING INSTANTIATED AND PASSED TO ALL OF THE DICTIONARY FIELDS; # THAT MEANS THAT ALL FIELDS WILL POINT TO THE SAME ARRAY AND WHEN WE CHANGE THE VALUE ON # ONE VIEW, ALL OF THE OTHERS WILL CHANGE AS WELL. # self.iteration_data = { # Moth: dict.fromkeys(self.universe.recordable_data, []), # Fly: dict.fromkeys(self.universe.recordable_data, []) # } # # now down here, we instantiate one array filled with zeroes for each field self.iteration_data = { Fly: {col: np.zeros(n_steps + 1) for col in self.universe.recordable_data}, Moth: {col: np.zeros(n_steps + 1) for col in self.universe.recordable_data} } # # initializes the dataframe by creating it empty, logging in all the creatures # and saving on it as its first element def initialize_log(self): # self.data_log = pd.DataFrame(data=None, index=None, columns=self.universe.df_columns) for moth in self.creatures[Moth]: self.log_creature(moth) for fly in self.creatures[Fly]: self.log_creature(fly) # # runs the world with a given number of steps 'end_of_times' # by repeatedly executing the 'single_step()' method. # # Returns the dataframe with the outputs generated from the # simulation. def run_world(self, n_flies, n_moths, end_of_times): self.n_moths = n_moths self.n_flies = n_flies self.initialize_world(end_of_times) for _ in range(end_of_times): self.single_step() return pd.DataFrame(data={(creature_type.name() + col): self.iteration_data[creature_type][col] for creature_type in [Fly, Moth] for col in self.universe.recordable_data}, index=range(end_of_times + 1), columns=self.universe.df_columns)
bsd-2-clause
winklerand/pandas
pandas/core/sparse/frame.py
1
35030
""" Data structures for sparse float data. Life is made simpler by dealing only with float64 data """ from __future__ import division # pylint: disable=E1101,E1103,W0231,E0202 import warnings from pandas.compat import lmap from pandas import compat import numpy as np from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.cast import maybe_upcast, find_common_type from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse from pandas.core.common import _try_sort from pandas.compat.numpy import function as nv from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.series import Series from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray, _default_index) import pandas.core.algorithms as algos from pandas.core.internals import (BlockManager, create_block_manager_from_arrays) import pandas.core.generic as generic from pandas.core.sparse.series import SparseSeries, SparseArray from pandas._libs.sparse import BlockIndex, get_blocks from pandas.util._decorators import Appender import pandas.core.ops as ops _shared_doc_kwargs = dict(klass='SparseDataFrame') class SparseDataFrame(DataFrame): """ DataFrame containing sparse floating point data in the form of SparseSeries objects Parameters ---------- data : same types as can be passed to DataFrame or scipy.sparse.spmatrix index : array-like, optional column : array-like, optional default_kind : {'block', 'integer'}, default 'block' Default sparse kind for converting Series to SparseSeries. Will not override SparseSeries passed into constructor default_fill_value : float Default fill_value for converting Series to SparseSeries (default: nan). Will not override SparseSeries passed in. """ _subtyp = 'sparse_frame' def __init__(self, data=None, index=None, columns=None, default_kind=None, default_fill_value=None, dtype=None, copy=False): # pick up the defaults from the Sparse structures if isinstance(data, SparseDataFrame): if index is None: index = data.index if columns is None: columns = data.columns if default_fill_value is None: default_fill_value = data.default_fill_value if default_kind is None: default_kind = data.default_kind elif isinstance(data, (SparseSeries, SparseArray)): if index is None: index = data.index if default_fill_value is None: default_fill_value = data.fill_value if columns is None and hasattr(data, 'name'): columns = [data.name] if columns is None: raise Exception("cannot pass a series w/o a name or columns") data = {columns[0]: data} if default_fill_value is None: default_fill_value = np.nan if default_kind is None: default_kind = 'block' self._default_kind = default_kind self._default_fill_value = default_fill_value if is_scipy_sparse(data): mgr = self._init_spmatrix(data, index, columns, dtype=dtype, fill_value=default_fill_value) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, (np.ndarray, list)): mgr = self._init_matrix(data, index, columns, dtype=dtype) elif isinstance(data, SparseDataFrame): mgr = self._init_mgr(data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, DataFrame): mgr = self._init_dict(data, data.index, data.columns, dtype=dtype) elif isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif data is None: data = DataFrame() if index is None: index = Index([]) else: index = _ensure_index(index) if columns is None: columns = Index([]) else: for c in columns: data[c] = SparseArray(np.nan, index=index, kind=self._default_kind, fill_value=self._default_fill_value) mgr = to_manager(data, columns, index) if dtype is not None: mgr = mgr.astype(dtype) generic.NDFrame.__init__(self, mgr) @property def _constructor(self): return SparseDataFrame _constructor_sliced = SparseSeries def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: columns = _ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: columns = Index(_try_sort(list(data.keys()))) if index is None: index = extract_index(list(data.values())) sp_maker = lambda x: SparseArray(x, kind=self._default_kind, fill_value=self._default_fill_value, copy=True, dtype=dtype) sdict = {} for k, v in compat.iteritems(data): if isinstance(v, Series): # Force alignment, no copy necessary if not v.index.equals(index): v = v.reindex(index) if not isinstance(v, SparseSeries): v = sp_maker(v.values) elif isinstance(v, SparseArray): v = v.copy() else: if isinstance(v, dict): v = [v.get(i, np.nan) for i in index] v = sp_maker(v) sdict[k] = v # TODO: figure out how to handle this case, all nan's? # add in any other columns we want to have (completeness) nan_arr = np.empty(len(index), dtype='float64') nan_arr.fill(np.nan) nan_arr = sp_maker(nan_arr) sdict.update((c, nan_arr) for c in columns if c not in sdict) return to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): """ Init self from ndarray or list of lists """ data = _prep_ndarray(data, copy=False) index, columns = self._prep_index(data, index, columns) data = {idx: data[:, i] for i, idx in enumerate(columns)} return self._init_dict(data, index, columns, dtype) def _init_spmatrix(self, data, index, columns, dtype=None, fill_value=None): """ Init self from scipy.sparse matrix """ index, columns = self._prep_index(data, index, columns) data = data.tocoo() N = len(index) # Construct a dict of SparseSeries sdict = {} values = Series(data.data, index=data.row, copy=False) for col, rowvals in values.groupby(data.col): # get_blocks expects int32 row indices in sorted order rowvals = rowvals.sort_index() rows = rowvals.index.values.astype(np.int32) blocs, blens = get_blocks(rows) sdict[columns[col]] = SparseSeries( rowvals.values, index=index, fill_value=fill_value, sparse_index=BlockIndex(N, blocs, blens)) # Add any columns that were empty and thus not grouped on above sdict.update({column: SparseSeries(index=index, fill_value=fill_value, sparse_index=BlockIndex(N, [], [])) for column in columns if column not in sdict}) return self._init_dict(sdict, index, columns, dtype) def _prep_index(self, data, index, columns): N, K = data.shape if index is None: index = _default_index(N) if columns is None: columns = _default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: {columns} vs. {K}' .format(columns=len(columns), K=K)) if len(index) != N: raise ValueError('Index length mismatch: {index} vs. {N}' .format(index=len(index), N=N)) return index, columns def to_coo(self): """ Return the contents of the frame as a sparse SciPy COO matrix. .. versionadded:: 0.20.0 Returns ------- coo_matrix : scipy.sparse.spmatrix If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. Notes ----- The dtype will be the lowest-common-denominator type (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. By numpy.find_common_type convention, mixing int64 and and uint64 will result in a float64 dtype. """ try: from scipy.sparse import coo_matrix except ImportError: raise ImportError('Scipy is not installed') dtype = find_common_type(self.dtypes) cols, rows, datas = [], [], [] for col, name in enumerate(self): s = self[name] row = s.sp_index.to_int_index().indices cols.append(np.repeat(col, len(row))) rows.append(row) datas.append(s.sp_values.astype(dtype, copy=False)) cols = np.concatenate(cols) rows = np.concatenate(rows) datas = np.concatenate(datas) return coo_matrix((datas, (rows, cols)), shape=self.shape) def __array_wrap__(self, result): return self._constructor( result, index=self.index, columns=self.columns, default_kind=self._default_kind, default_fill_value=self._default_fill_value).__finalize__(self) def __getstate__(self): # pickling return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data, _default_fill_value=self._default_fill_value, _default_kind=self._default_kind) def _unpickle_sparse_frame_compat(self, state): """ original pickle format """ series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover from pandas.io.pickle import _unpickle_array columns = _unpickle_array(cols) else: columns = cols if not isinstance(idx, Index): # pragma: no cover from pandas.io.pickle import _unpickle_array index = _unpickle_array(idx) else: index = idx series_dict = DataFrame() for col, (sp_index, sp_values) in compat.iteritems(series): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) self._data = to_manager(series_dict, columns, index) self._default_fill_value = fv self._default_kind = kind def to_dense(self): """ Convert to dense DataFrame Returns ------- df : DataFrame """ data = {k: v.to_dense() for k, v in compat.iteritems(self)} return DataFrame(data, index=self.index, columns=self.columns) def _apply_columns(self, func): """ get new SparseDataFrame applying func to each columns """ new_data = {} for col, series in compat.iteritems(self): new_data[col] = func(series) return self._constructor( data=new_data, index=self.index, columns=self.columns, default_fill_value=self.default_fill_value).__finalize__(self) def astype(self, dtype): return self._apply_columns(lambda x: x.astype(dtype)) def copy(self, deep=True): """ Make a copy of this SparseDataFrame """ result = super(SparseDataFrame, self).copy(deep=deep) result._default_fill_value = self._default_fill_value result._default_kind = self._default_kind return result @property def default_fill_value(self): return self._default_fill_value @property def default_kind(self): return self._default_kind @property def density(self): """ Ratio of non-sparse points to total (dense) data points represented in the frame """ tot_nonsparse = sum(ser.sp_index.npoints for _, ser in compat.iteritems(self)) tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) def fillna(self, value=None, method=None, axis=0, inplace=False, limit=None, downcast=None): new_self = super(SparseDataFrame, self).fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast) if not inplace: self = new_self # set the fill value if we are filling as a scalar with nothing special # going on if (value is not None and value == value and method is None and limit is None): self._default_fill_value = value if not inplace: return self # ---------------------------------------------------------------------- # Support different internal representation of SparseDataFrame def _sanitize_column(self, key, value, **kwargs): """ Creates a new SparseArray from the input value. Parameters ---------- key : object value : scalar, Series, or array-like kwargs : dict Returns ------- sanitized_column : SparseArray """ sp_maker = lambda x, index=None: SparseArray( x, index=index, fill_value=self._default_fill_value, kind=self._default_kind) if isinstance(value, SparseSeries): clean = value.reindex(self.index).as_sparse_array( fill_value=self._default_fill_value, kind=self._default_kind) elif isinstance(value, SparseArray): if len(value) != len(self.index): raise AssertionError('Length of values does not match ' 'length of index') clean = value elif hasattr(value, '__iter__'): if isinstance(value, Series): clean = value.reindex(self.index) if not isinstance(value, SparseSeries): clean = sp_maker(clean) else: if len(value) != len(self.index): raise AssertionError('Length of values does not match ' 'length of index') clean = sp_maker(value) # Scalar else: clean = sp_maker(value, self.index) # always return a SparseArray! return clean def __getitem__(self, key): """ Retrieve column or slice from DataFrame """ if isinstance(key, slice): date_rng = self.index[key] return self.reindex(date_rng) elif isinstance(key, (np.ndarray, list, Series)): return self._getitem_array(key) else: return self._get_item_cache(key) def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- value : scalar value """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable) def _get_value(self, index, col, takeable=False): if takeable is True: series = self._iget_item_cache(col) else: series = self._get_item_cache(col) return series._get_value(index, takeable=takeable) _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : row label col : column label value : scalar value takeable : interpret the index/col as indexers, default False Notes ----- This method *always* returns a new object. It is currently not particularly efficient (and potentially very expensive) but is provided for API compatibility with DataFrame Returns ------- frame : DataFrame """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable) def _set_value(self, index, col, value, takeable=False): dense = self.to_dense()._set_value( index, col, value, takeable=takeable) return dense.to_sparse(kind=self._default_kind, fill_value=self._default_fill_value) _set_value.__doc__ = set_value.__doc__ def _slice(self, slobj, axis=0, kind=None): if axis == 0: new_index = self.index[slobj] new_columns = self.columns else: new_index = self.index new_columns = self.columns[slobj] return self.reindex(index=new_index, columns=new_columns) def xs(self, key, axis=0, copy=False): """ Returns a row (cross-section) from the SparseDataFrame as a Series object. Parameters ---------- key : some index contained in the index Returns ------- xs : Series """ if axis == 1: data = self[key] return data i = self.index.get_loc(key) data = self.take([i]).get_values()[0] return Series(data, index=self.columns) # ---------------------------------------------------------------------- # Arithmetic-related methods def _combine_frame(self, other, func, fill_value=None, level=None, try_cast=True): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns if level is not None: raise NotImplementedError("'level' argument is not supported") if self.empty and other.empty: return self._constructor(index=new_index).__finalize__(self) new_data = {} new_fill_value = None if fill_value is not None: # TODO: be a bit more intelligent here for col in new_columns: if col in this and col in other: dleft = this[col].to_dense() dright = other[col].to_dense() result = dleft._binop(dright, func, fill_value=fill_value) result = result.to_sparse(fill_value=this[col].fill_value) new_data[col] = result else: for col in new_columns: if col in this and col in other: new_data[col] = func(this[col], other[col]) # if the fill values are the same use them? or use a valid one other_fill_value = getattr(other, 'default_fill_value', np.nan) if self.default_fill_value == other_fill_value: new_fill_value = self.default_fill_value elif np.isnan(self.default_fill_value) and not np.isnan( other_fill_value): new_fill_value = other_fill_value elif not np.isnan(self.default_fill_value) and np.isnan( other_fill_value): new_fill_value = self.default_fill_value return self._constructor(data=new_data, index=new_index, columns=new_columns, default_fill_value=new_fill_value ).__finalize__(self) def _combine_match_index(self, other, func, level=None, fill_value=None, try_cast=True): new_data = {} if fill_value is not None: raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") new_index = self.index.union(other.index) this = self if self.index is not new_index: this = self.reindex(new_index) if other.index is not new_index: other = other.reindex(new_index) for col, series in compat.iteritems(this): new_data[col] = func(series.values, other.values) # fill_value is a function of our operator if isna(other.fill_value) or isna(self.default_fill_value): fill_value = np.nan else: fill_value = func(np.float64(self.default_fill_value), np.float64(other.fill_value)) return self._constructor( new_data, index=new_index, columns=self.columns, default_fill_value=fill_value).__finalize__(self) def _combine_match_columns(self, other, func, level=None, fill_value=None, try_cast=True): # patched version of DataFrame._combine_match_columns to account for # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series, # where 3.0 is numpy.float64 and series is a SparseSeries. Still # possible for this to happen, which is bothersome if fill_value is not None: raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") new_data = {} union = intersection = self.columns if not union.equals(other.index): union = other.index.union(self.columns) intersection = other.index.intersection(self.columns) for col in intersection: new_data[col] = func(self[col], float(other[col])) return self._constructor( new_data, index=self.index, columns=union, default_fill_value=self.default_fill_value).__finalize__(self) def _combine_const(self, other, func, errors='raise', try_cast=True): return self._apply_columns(lambda x: func(x, other)) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False): if level is not None: raise TypeError('Reindex by level not supported for sparse') if self.index.equals(index): if copy: return self.copy() else: return self if len(self.index) == 0: return self._constructor( index=index, columns=self.columns).__finalize__(self) indexer = self.index.get_indexer(index, method, limit=limit) indexer = _ensure_platform_int(indexer) mask = indexer == -1 need_mask = mask.any() new_series = {} for col, series in self.iteritems(): if mask.all(): continue values = series.values # .take returns SparseArray new = values.take(indexer) if need_mask: new = new.values # convert integer to float if necessary. need to do a lot # more than that, handle boolean etc also new, fill_value = maybe_upcast(new, fill_value=fill_value) np.putmask(new, mask, fill_value) new_series[col] = new return self._constructor( new_series, index=index, columns=self.columns, default_fill_value=self._default_fill_value).__finalize__(self) def _reindex_columns(self, columns, method, copy, level, fill_value=None, limit=None, takeable=False): if level is not None: raise TypeError('Reindex by level not supported for sparse') if notna(fill_value): raise NotImplementedError("'fill_value' argument is not supported") if limit: raise NotImplementedError("'limit' argument is not supported") if method is not None: raise NotImplementedError("'method' argument is not supported") # TODO: fill value handling sdict = {k: v for k, v in compat.iteritems(self) if k in columns} return self._constructor( sdict, index=self.index, columns=columns, default_fill_value=self._default_fill_value).__finalize__(self) def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit=None, copy=False, allow_dups=False): if method is not None or limit is not None: raise NotImplementedError("cannot reindex with a method or limit " "with sparse") if fill_value is None: fill_value = np.nan index, row_indexer = reindexers.get(0, (None, None)) columns, col_indexer = reindexers.get(1, (None, None)) if columns is None: columns = self.columns new_arrays = {} for col in columns: if col not in self: continue if row_indexer is not None: new_arrays[col] = algos.take_1d(self[col].get_values(), row_indexer, fill_value=fill_value) else: new_arrays[col] = self[col] return self._constructor(new_arrays, index=index, columns=columns).__finalize__(self) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): if on is not None: raise NotImplementedError("'on' keyword parameter is not yet " "implemented") return self._join_index(other, how, lsuffix, rsuffix) def _join_index(self, other, how, lsuffix, rsuffix): if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = SparseDataFrame( {other.name: other}, default_fill_value=self._default_fill_value) join_index = self.index.join(other.index, how=how) this = self.reindex(join_index) other = other.reindex(join_index) this, other = this._maybe_rename_join(other, lsuffix, rsuffix) from pandas import concat return concat([this, other], axis=1, verify_integrity=True) def _maybe_rename_join(self, other, lsuffix, rsuffix): to_rename = self.columns.intersection(other.columns) if len(to_rename) > 0: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: ' '{to_rename}'.format(to_rename=to_rename)) def lrenamer(x): if x in to_rename: return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) return x def rrenamer(x): if x in to_rename: return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) return x this = self.rename(columns=lrenamer) other = other.rename(columns=rrenamer) else: this = self return this, other def transpose(self, *args, **kwargs): """ Returns a DataFrame with the rows/columns switched. """ nv.validate_transpose(args, kwargs) return self._constructor( self.values.T, index=self.columns, columns=self.index, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) T = property(transpose) @Appender(DataFrame.count.__doc__) def count(self, axis=0, **kwds): if axis is None: axis = self._stat_axis_number return self.apply(lambda x: x.count(), axis=axis) def cumsum(self, axis=0, *args, **kwargs): """ Return SparseDataFrame of cumulative sums over requested axis. Parameters ---------- axis : {0, 1} 0 for row-wise, 1 for column-wise Returns ------- y : SparseDataFrame """ nv.validate_cumsum(args, kwargs) if axis is None: axis = self._stat_axis_number return self.apply(lambda x: x.cumsum(), axis=axis) @Appender(generic._shared_docs['isna']) def isna(self): return self._apply_columns(lambda x: x.isna()) isnull = isna @Appender(generic._shared_docs['notna']) def notna(self): return self._apply_columns(lambda x: x.notna()) notnull = notna def apply(self, func, axis=0, broadcast=False, reduce=False): """ Analogous to DataFrame.apply, for SparseDataFrame Parameters ---------- func : function Function to apply to each column axis : {0, 1, 'index', 'columns'} broadcast : bool, default False For aggregation functions, return object of same size with values propagated Returns ------- applied : Series or SparseDataFrame """ if not len(self.columns): return self axis = self._get_axis_number(axis) if isinstance(func, np.ufunc): new_series = {} for k, v in compat.iteritems(self): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied return self._constructor( new_series, index=self.index, columns=self.columns, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) else: if not broadcast: return self._apply_standard(func, axis, reduce=reduce) else: return self._apply_broadcast(func, axis) def applymap(self, func): """ Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Returns ------- applied : DataFrame """ return self.apply(lambda x: lmap(func, x)) def to_manager(sdf, columns, index): """ create and return the block manager from a dataframe of series, columns, index """ # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] return create_block_manager_from_arrays( [sdf[c] for c in columns], columns, axes) def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] nobs = sum(lengths) # this is pretty fast minor_labels = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). for _, series in compat.iteritems(frame): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_labels = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], labels=[major_labels, minor_labels], verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) return lp.sort_index(level=0) def homogenize(series_dict): """ Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex corresponding to the locations where they all have data Parameters ---------- series_dict : dict or DataFrame Notes ----- Using the dumbest algorithm I could think of. Should put some more thought into this Returns ------- homogenized : dict of SparseSeries """ index = None need_reindex = False for _, series in compat.iteritems(series_dict): if not np.isnan(series.fill_value): raise TypeError('this method is only valid with NaN fill values') if index is None: index = series.sp_index elif not series.sp_index.equals(index): need_reindex = True index = index.intersect(series.sp_index) if need_reindex: output = {} for name, series in compat.iteritems(series_dict): if not series.sp_index.equals(index): series = series.sparse_reindex(index) output[name] = series else: output = series_dict return output # use unaccelerated ops for sparse objects ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False, **ops.frame_special_funcs)
bsd-3-clause
nikitasingh981/scikit-learn
examples/svm/plot_svm_nonlinear.py
62
1119
""" ============== Non-linear SVM ============== Perform binary classification using non-linear SVC with RBF kernel. The target to predict is a XOR of the inputs. The color map illustrates the decision function learned by the SVC. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm xx, yy = np.meshgrid(np.linspace(-3, 3, 500), np.linspace(-3, 3, 500)) np.random.seed(0) X = np.random.randn(300, 2) Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) # fit the model clf = svm.NuSVC() clf.fit(X, Y) # plot the decision function for each datapoint on the grid Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto', origin='lower', cmap=plt.cm.PuOr_r) contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, linetypes='--') plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired, edgecolors='k') plt.xticks(()) plt.yticks(()) plt.axis([-3, 3, -3, 3]) plt.show()
bsd-3-clause
vigilv/scikit-learn
sklearn/tests/test_base.py
216
7045
# Author: Gael Varoquaux # License: BSD 3 clause import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.base import BaseEstimator, clone, is_classifier from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import deprecated ############################################################################# # A few test classes class MyEstimator(BaseEstimator): def __init__(self, l1=0, empty=None): self.l1 = l1 self.empty = empty class K(BaseEstimator): def __init__(self, c=None, d=None): self.c = c self.d = d class T(BaseEstimator): def __init__(self, a=None, b=None): self.a = a self.b = b class DeprecatedAttributeEstimator(BaseEstimator): def __init__(self, a=None, b=None): self.a = a if b is not None: DeprecationWarning("b is deprecated and renamed 'a'") self.a = b @property @deprecated("Parameter 'b' is deprecated and renamed to 'a'") def b(self): return self._b class Buggy(BaseEstimator): " A buggy estimator that does not set its parameters right. " def __init__(self, a=None): self.a = 1 class NoEstimator(object): def __init__(self): pass def fit(self, X=None, y=None): return self def predict(self, X=None): return None class VargEstimator(BaseEstimator): """Sklearn estimators shouldn't have vargs.""" def __init__(self, *vargs): pass ############################################################################# # The tests def test_clone(): # Tests that clone creates a correct deep copy. # We create an estimator, make a copy of its original state # (which, in this case, is the current state of the estimator), # and check that the obtained copy is a correct deep copy. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) new_selector = clone(selector) assert_true(selector is not new_selector) assert_equal(selector.get_params(), new_selector.get_params()) selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) new_selector = clone(selector) assert_true(selector is not new_selector) def test_clone_2(): # Tests that clone doesn't copy everything. # We first create an estimator, give it an own attribute, and # make a copy of its original state. Then we check that the copy doesn't # have the specific attribute we manually added to the initial estimator. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) selector.own_attribute = "test" new_selector = clone(selector) assert_false(hasattr(new_selector, "own_attribute")) def test_clone_buggy(): # Check that clone raises an error on buggy estimators. buggy = Buggy() buggy.a = 2 assert_raises(RuntimeError, clone, buggy) no_estimator = NoEstimator() assert_raises(TypeError, clone, no_estimator) varg_est = VargEstimator() assert_raises(RuntimeError, clone, varg_est) def test_clone_empty_array(): # Regression test for cloning estimators with empty arrays clf = MyEstimator(empty=np.array([])) clf2 = clone(clf) assert_array_equal(clf.empty, clf2.empty) clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) clf2 = clone(clf) assert_array_equal(clf.empty.data, clf2.empty.data) def test_clone_nan(): # Regression test for cloning estimators with default parameter as np.nan clf = MyEstimator(empty=np.nan) clf2 = clone(clf) assert_true(clf.empty is clf2.empty) def test_repr(): # Smoke test the repr of the base estimator. my_estimator = MyEstimator() repr(my_estimator) test = T(K(), K()) assert_equal( repr(test), "T(a=K(c=None, d=None), b=K(c=None, d=None))" ) some_est = T(a=["long_params"] * 1000) assert_equal(len(repr(some_est)), 415) def test_str(): # Smoke test the str of the base estimator my_estimator = MyEstimator() str(my_estimator) def test_get_params(): test = T(K(), K()) assert_true('a__d' in test.get_params(deep=True)) assert_true('a__d' not in test.get_params(deep=False)) test.set_params(a__d=2) assert_true(test.a.d == 2) assert_raises(ValueError, test.set_params, a__a=2) def test_get_params_deprecated(): # deprecated attribute should not show up as params est = DeprecatedAttributeEstimator(a=1) assert_true('a' in est.get_params()) assert_true('a' in est.get_params(deep=True)) assert_true('a' in est.get_params(deep=False)) assert_true('b' not in est.get_params()) assert_true('b' not in est.get_params(deep=True)) assert_true('b' not in est.get_params(deep=False)) def test_is_classifier(): svc = SVC() assert_true(is_classifier(svc)) assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]}))) assert_true(is_classifier(Pipeline([('svc', svc)]))) assert_true(is_classifier(Pipeline([('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))]))) def test_set_params(): # test nested estimator parameter setting clf = Pipeline([("svc", SVC())]) # non-existing parameter in svc assert_raises(ValueError, clf.set_params, svc__stupid_param=True) # non-existing parameter of pipeline assert_raises(ValueError, clf.set_params, svm__stupid_param=True) # we don't currently catch if the things in pipeline are estimators # bad_pipeline = Pipeline([("bad", NoEstimator())]) # assert_raises(AttributeError, bad_pipeline.set_params, # bad__stupid_param=True) def test_score_sample_weight(): from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn import datasets rng = np.random.RandomState(0) # test both ClassifierMixin and RegressorMixin estimators = [DecisionTreeClassifier(max_depth=2), DecisionTreeRegressor(max_depth=2)] sets = [datasets.load_iris(), datasets.load_boston()] for est, ds in zip(estimators, sets): est.fit(ds.data, ds.target) # generate random sample weights sample_weight = rng.randint(1, 10, size=len(ds.target)) # check that the score with and without sample weights are different assert_not_equal(est.score(ds.data, ds.target), est.score(ds.data, ds.target, sample_weight=sample_weight), msg="Unweighted and weighted scores " "are unexpectedly equal")
bsd-3-clause
hyrise/hyrise
scripts/plot_performance_over_commit_range.py
1
2039
#!/usr/bin/python3 # Given a range of benchmark jsons created by run_benchmarks_over_commit_range.sh, plots a graph showing the # performance development over time import matplotlib.pyplot as plt import os import pandas as pd from pydriller import RepositoryMining import re import sys if len(sys.argv) != 4: exit("Usage: " + sys.argv[0] + " <path to Hyrise> <first commit> <last commit>") results = [] column_names = ["Commit"] for commit in RepositoryMining(sys.argv[1], from_commit=sys.argv[2], to_commit=sys.argv[3]).traverse_commits(): result = [] result.append(commit.hash[:6] + " " + commit.msg.partition("\n")[0]) json_path = "auto_" + commit.hash + ".json" if not os.path.exists(json_path): print("JSON file not found: " + json_path) for column in column_names: result.append(None) continue with open(json_path) as file: # If column headers are unset, set them if len(column_names) == 1: for name in re.findall(r'name": "(.*)"', file.read()): column_names.append(name) file.seek(0) # Quick and dirty way to retrieve the throughput. This is "better" than parsing the JSON # because while the JSON has changed over time, this field has been available for a long time. for items_per_second in re.findall(r'items_per_second": ([0-9]+\.[0-9]*)', file.read()): result.append(float(items_per_second)) if len(result) != len(column_names): exit("Mismatching number of benchmarks, starting with commit " + commit) results.append(result) df = pd.DataFrame(results) df.columns = column_names # Normalize dataframe so that all values are relative to the first throughput of that benchmark df.iloc[:, 1:] = df.iloc[:, 1:].apply(lambda x: x / x[0], axis=0) print(df) df.plot.line(x="Commit", figsize=(64, 18)) plt.xticks(df.index, df["Commit"], rotation=90) plt.ylabel("Throughput relative to first commit") plt.tight_layout() plt.savefig("output.pdf")
mit
swathimatsa123/wifimonitor
Network.py
1
2832
import Device from scapy.all import * import threading from pylab import * import matplotlib.pyplot as plt import time import os class Network: num_devices = 0 device_list = {} bwUsageHistory = {} def Join_device(self, mac_addr, ip_addr=None): dev = Device.device(mac_addr, ip_addr) self.device_list[mac_addr] = dev print 'Join the device %s' %(mac_addr) def Does_Device_Exist (self, mac_addr) : if self.device_list.has_key(mac_addr): return True else: return False def Delete_device(self, mac_addr): del self.device_list[mac_addr] print 'Delete the device' def plotUSage (self, sleepCount ) : xdata = [] ydata = [] positions = [] count = 0 totalBandwidth = 0 for mac, device in self.device_list.items(): xdata.append(mac) deviceBandwidth = device.get_bandwidth() device.reset_bandwidth() totalBandwidth = totalBandwidth + deviceBandwidth ydata.append(deviceBandwidth) positions.append(count+0.5) count = count + 0.5 bar(positions,ydata, align='center') xticks(positions, xdata) ylabel('Bandwidth') title('Total bandwidth usage in last 5 min') grid(True) savefig("usage5Min.png") plt.clf() print 'Plot image is ready. Name %s' %('usage5Min.png') if (sleepCount % 6 == 0): print '## Printing total usage %s' %(totalBandwidth) self.bwUsageHistory[sleepCount/6] = totalBandwidth times = self.bwUsageHistory.keys() totals = self.bwUsageHistory.values() plt.plot(times, totals) grid(True) os.system('/bin/mv totalUsage.png totalUsageSofar.png') savefig("totalUsage.png") plt.clf() def Display_devices(self): pass def Process_packet(self, pkt = None): for key, value in pkt.items(): self.device_list[key].consume_bandwidth(value) def PktHeaderParser (pkt): if pkt.haslayer(Dot11): wrlsHdr = pkt[Dot11] dst = wrlsHdr.fields['addr1'] src = wrlsHdr.fields['addr2'] addr3 = wrlsHdr.fields['addr3'] if src != None: if not network.Does_Device_Exist (src): network.Join_device (src) network.Process_packet({ src: len(pkt)}) def startMonitor(): sniff(iface="mon0", prn = PktHeaderParser) global network if __name__=="__main__": network = Network() t= threading.Thread(target=startMonitor) t.start() sleepCount = 0 while True: time.sleep(3) sleepCount +=1 network.plotUSage(sleepCount)
apache-2.0
ruohoruotsi/Wavelet-Tree-Synth
nnet/VRNN_nips2015/datasets/iamondb.py
1
3778
import ipdb import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import numpy as np import scipy.signal import theano.tensor as T from cle.cle.data import TemporalSeries from cle.cle.data.prep import SequentialPrepMixin from cle.cle.utils import segment_axis, tolist, totuple from iamondb_utils import fetch_iamondb class IAMOnDB(TemporalSeries, SequentialPrepMixin): """ IAMOnDB dataset batch provider Parameters ---------- .. todo:: """ def __init__(self, prep='none', cond=False, X_mean=None, X_std=None, bias=None, **kwargs): self.prep = prep self.cond = cond self.X_mean = X_mean self.X_std = X_std self.bias = bias super(IAMOnDB, self).__init__(**kwargs) def load(self, data_path): if self.name == "train": X, y, _, _ = fetch_iamondb(data_path) print("train") print(len(X)) print(len(y)) elif self.name == "valid": _, _, X, y = fetch_iamondb(data_path) print("valid") print(len(X)) print(len(y)) raw_X = X raw_X0 = [] offset = True raw_new_X = [] for item in raw_X: if offset: raw_X0.append(item[1:, 0]) raw_new_X.append(item[1:, 1:] - item[:-1, 1:]) else: raw_X0.append(item[:, 0]) raw_new_X.append(item[:, 1:]) raw_new_X, self.X_mean, self.X_std = self.global_normalize(raw_new_X, self.X_mean, self.X_std) new_x = [] for n in range(raw_new_X.shape[0]): new_x.append(np.concatenate((raw_X0[n][:, None], raw_new_X[n]), axis=-1).astype('float32')) new_x = np.array(new_x) if self.prep == 'none': X = np.array(raw_X) if self.prep == 'normalize': X = new_x print X[0].shape elif self.prep == 'standardize': X, self.X_max, self.X_min = self.standardize(raw_X) self.labels = [np.array(y)] return [X] def theano_vars(self): if self.cond: return [T.ftensor3('x'), T.fmatrix('mask'), T.ftensor3('y'), T.fmatrix('label_mask')] else: return [T.ftensor3('x'), T.fmatrix('mask')] def theano_test_vars(self): return [T.ftensor3('y'), T.fmatrix('label_mask')] def slices(self, start, end): batches = [mat[start:end] for mat in self.data] label_batches = [mat[start:end] for mat in self.labels] mask = self.create_mask(batches[0].swapaxes(0, 1)) batches = [self.zero_pad(batch) for batch in batches] label_mask = self.create_mask(label_batches[0].swapaxes(0, 1)) label_batches = [self.zero_pad(batch) for batch in label_batches] if self.cond: return totuple([batches[0], mask, label_batches[0], label_mask]) else: return totuple([batches[0], mask]) def generate_index(self, X): maxlen = np.array([len(x) for x in X]).max() idx = np.arange(maxlen) return idx if __name__ == "__main__": data_path = '/data/lisatmp3/iamondb/' iamondb = IAMOnDB(name='train', prep='normalize', cond=False, path=data_path) batch = iamondb.slices(start=0, end=10826) X = iamondb.data[0] sub_X = X for item in X: max_x = np.max(item[:,1]) max_y = np.max(item[:,2]) min_x = np.min(item[:,1]) min_y = np.min(item[:,2]) print np.max(max_x) print np.max(max_y) print np.min(min_x) print np.min(min_y) ipdb.set_trace()
gpl-2.0
akaraspt/deepsleepnet
tensorlayer/utils.py
1
21547
#! /usr/bin/python # -*- coding: utf8 -*- import tensorflow as tf import tensorlayer as tl from . import iterate import numpy as np import time import math import random def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=100, n_epoch=100, print_freq=5, X_val=None, y_val=None, eval_train=True, tensorboard=False, tensorboard_epoch_freq=5, tensorboard_weight_histograms=True, tensorboard_graph_vis=True): """Traing a given non time-series network by the given cost function, training data, batch_size, n_epoch etc. Parameters ---------- sess : TensorFlow session sess = tf.InteractiveSession() network : a TensorLayer layer the network will be trained train_op : a TensorFlow optimizer like tf.compat.v1.train.AdamOptimizer X_train : numpy array the input of training data y_train : numpy array the target of training data x : placeholder for inputs y_ : placeholder for targets acc : the TensorFlow expression of accuracy (or other metric) or None if None, would not display the metric batch_size : int batch size for training and evaluating n_epoch : int the number of training epochs print_freq : int display the training information every ``print_freq`` epochs X_val : numpy array or None the input of validation data y_val : numpy array or None the target of validation data eval_train : boolean if X_val and y_val are not None, it refects whether to evaluate the training data tensorboard : boolean if True summary data will be stored to the log/ direcory for visualization with tensorboard. See also detailed tensorboard_X settings for specific configurations of features. (default False) Also runs tl.layers.initialize_global_variables(sess) internally in fit() to setup the summary nodes, see Note: tensorboard_epoch_freq : int how many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5) tensorboard_weight_histograms : boolean if True updates tensorboard data in the logs/ directory for visulaization of the weight histograms every tensorboard_epoch_freq epoch (default True) tensorboard_graph_vis : boolean if True stores the graph in the tensorboard summaries saved to log/ (default True) Examples -------- >>> see tutorial_mnist_simple.py >>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, ... acc=acc, batch_size=500, n_epoch=200, print_freq=5, ... X_val=X_val, y_val=y_val, eval_train=False) >>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, ... acc=acc, batch_size=500, n_epoch=200, print_freq=5, ... X_val=X_val, y_val=y_val, eval_train=False, ... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True) Note -------- If tensorboard=True, the global_variables_initializer will be run inside the fit function in order to initalize the automatically generated summary nodes used for tensorboard visualization, thus tf.compat.v1.global_variables_initializer().run() before the fit() call will be undefined. """ assert X_train.shape[0] >= batch_size, "Number of training examples should be bigger than the batch size" if(tensorboard): print("Setting up tensorboard ...") #Set up tensorboard summaries and saver tl.files.exists_or_mkdir('logs/') #Only write summaries for more recent TensorFlow versions if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'): if tensorboard_graph_vis: train_writer = tf.compat.v1.summary.FileWriter('logs/train',sess.graph) val_writer = tf.compat.v1.summary.FileWriter('logs/validation',sess.graph) else: train_writer = tf.compat.v1.summary.FileWriter('logs/train') val_writer = tf.compat.v1.summary.FileWriter('logs/validation') #Set up summary nodes if(tensorboard_weight_histograms): for param in network.all_params: if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): print(('Param name ', param.name)) tf.summary.histogram(param.name, param) if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): tf.summary.scalar('cost', cost) merged = tf.summary.merge_all() #Initalize all variables and summaries tl.layers.initialize_global_variables(sess) print("Finished! use $tensorboard --logdir=logs/ to start server") print("Start training the network ...") start_time_begin = time.time() tensorboard_train_index, tensorboard_val_index = 0, 0 for epoch in range(n_epoch): start_time = time.time() loss_ep = 0; n_step = 0 for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, batch_size, shuffle=True): feed_dict = {x: X_train_a, y_: y_train_a} feed_dict.update( network.all_drop ) # enable noise layers loss, _ = sess.run([cost, train_op], feed_dict=feed_dict) loss_ep += loss n_step += 1 loss_ep = loss_ep/ n_step if tensorboard and hasattr(tf, 'summary'): if epoch+1 == 1 or (epoch+1) % tensorboard_epoch_freq == 0: for X_train_a, y_train_a in iterate.minibatches( X_train, y_train, batch_size, shuffle=True): dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X_train_a, y_: y_train_a} feed_dict.update(dp_dict) result = sess.run(merged, feed_dict=feed_dict) train_writer.add_summary(result, tensorboard_train_index) tensorboard_train_index += 1 for X_val_a, y_val_a in iterate.minibatches( X_val, y_val, batch_size, shuffle=True): dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X_val_a, y_: y_val_a} feed_dict.update(dp_dict) result = sess.run(merged, feed_dict=feed_dict) val_writer.add_summary(result, tensorboard_val_index) tensorboard_val_index += 1 if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: if (X_val is not None) and (y_val is not None): print(("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))) if eval_train is True: train_loss, train_acc, n_batch = 0, 0, 0 for X_train_a, y_train_a in iterate.minibatches( X_train, y_train, batch_size, shuffle=True): dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X_train_a, y_: y_train_a} feed_dict.update(dp_dict) if acc is not None: err, ac = sess.run([cost, acc], feed_dict=feed_dict) train_acc += ac else: err = sess.run(cost, feed_dict=feed_dict) train_loss += err; n_batch += 1 print((" train loss: %f" % (train_loss/ n_batch))) if acc is not None: print((" train acc: %f" % (train_acc/ n_batch))) val_loss, val_acc, n_batch = 0, 0, 0 for X_val_a, y_val_a in iterate.minibatches( X_val, y_val, batch_size, shuffle=True): dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X_val_a, y_: y_val_a} feed_dict.update(dp_dict) if acc is not None: err, ac = sess.run([cost, acc], feed_dict=feed_dict) val_acc += ac else: err = sess.run(cost, feed_dict=feed_dict) val_loss += err; n_batch += 1 print((" val loss: %f" % (val_loss/ n_batch))) if acc is not None: print((" val acc: %f" % (val_acc/ n_batch))) else: print(("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep))) print(("Total training time: %fs" % (time.time() - start_time_begin))) def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): """ Test a given non time-series network by the given test data and metric. Parameters ---------- sess : TensorFlow session sess = tf.InteractiveSession() network : a TensorLayer layer the network will be trained acc : the TensorFlow expression of accuracy (or other metric) or None if None, would not display the metric X_test : numpy array the input of test data y_test : numpy array the target of test data x : placeholder for inputs y_ : placeholder for targets batch_size : int or None batch size for testing, when dataset is large, we should use minibatche for testing. when dataset is small, we can set it to None. cost : the TensorFlow expression of cost or None if None, would not display the cost Examples -------- >>> see tutorial_mnist_simple.py >>> tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost) """ print('Start testing the network ...') if batch_size is None: dp_dict = dict_to_one( network.all_drop ) feed_dict = {x: X_test, y_: y_test} feed_dict.update(dp_dict) if cost is not None: print((" test loss: %f" % sess.run(cost, feed_dict=feed_dict))) print((" test acc: %f" % sess.run(acc, feed_dict=feed_dict))) # print(" test acc: %f" % np.mean(y_test == sess.run(y_op, # feed_dict=feed_dict))) else: test_loss, test_acc, n_batch = 0, 0, 0 for X_test_a, y_test_a in iterate.minibatches( X_test, y_test, batch_size, shuffle=True): dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X_test_a, y_: y_test_a} feed_dict.update(dp_dict) if cost is not None: err, ac = sess.run([cost, acc], feed_dict=feed_dict) test_loss += err else: ac = sess.run(acc, feed_dict=feed_dict) test_acc += ac; n_batch += 1 if cost is not None: print((" test loss: %f" % (test_loss/ n_batch))) print((" test acc: %f" % (test_acc/ n_batch))) def predict(sess, network, X, x, y_op): """ Return the predict results of given non time-series network. Parameters ---------- sess : TensorFlow session sess = tf.InteractiveSession() network : a TensorLayer layer the network will be trained X : numpy array the input x : placeholder for inputs y_op : placeholder the argmax expression of softmax outputs Examples -------- >>> see tutorial_mnist_simple.py >>> y = network.outputs >>> y_op = tf.argmax(tf.nn.softmax(y), 1) >>> print(tl.utils.predict(sess, network, X_test, x, y_op)) """ dp_dict = dict_to_one( network.all_drop ) # disable noise layers feed_dict = {x: X,} feed_dict.update(dp_dict) return sess.run(y_op, feed_dict=feed_dict) ## Evaluation def evaluation(y_test=None, y_predict=None, n_classes=None): """ Input the predicted results, targets results and the number of class, return the confusion matrix, F1-score of each class, accuracy and macro F1-score. Parameters ---------- y_test : numpy.array or list target results y_predict : numpy.array or list predicted results n_classes : int number of classes Examples -------- >>> c_mat, f1, acc, f1_macro = evaluation(y_test, y_predict, n_classes) """ from sklearn.metrics import confusion_matrix, f1_score, accuracy_score c_mat = confusion_matrix(y_test, y_predict, labels = [x for x in range(n_classes)]) f1 = f1_score(y_test, y_predict, average = None, labels = [x for x in range(n_classes)]) f1_macro = f1_score(y_test, y_predict, average='macro') acc = accuracy_score(y_test, y_predict) print(('confusion matrix: \n',c_mat)) print(('f1-score:',f1)) print(('f1-score(macro):',f1_macro)) # same output with > f1_score(y_true, y_pred, average='macro') print(('accuracy-score:', acc)) return c_mat, f1, acc, f1_macro def dict_to_one(dp_dict={}): """ Input a dictionary, return a dictionary that all items are set to one, use for disable dropout, dropconnect layer and so on. Parameters ---------- dp_dict : dictionary keeping probabilities Examples -------- >>> dp_dict = dict_to_one( network.all_drop ) >>> dp_dict = dict_to_one( network.all_drop ) >>> feed_dict.update(dp_dict) """ return {x: 1 for x in dp_dict} def flatten_list(list_of_list=[[],[]]): """ Input a list of list, return a list that all items are in a list. Parameters ---------- list_of_list : a list of list Examples -------- >>> tl.utils.flatten_list([[1, 2, 3],[4, 5],[6]]) ... [1, 2, 3, 4, 5, 6] """ return sum(list_of_list, []) def class_balancing_oversample(X_train=None, y_train=None, printable=True): """Input the features and labels, return the features and labels after oversampling. Parameters ---------- X_train : numpy.array Features, each row is an example y_train : numpy.array Labels Examples -------- - One X >>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True) - Two X >>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False) >>> X1 = X[:, 0:5] >>> X2 = X[:, 5:] """ # ======== Classes balancing if printable: print("Classes balancing for training examples...") from collections import Counter c = Counter(y_train) if printable: print(('the occurrence number of each stage: %s' % c.most_common())) print(('the least stage is Label %s have %s instances' % c.most_common()[-1])) print(('the most stage is Label %s have %s instances' % c.most_common(1)[0])) most_num = c.most_common(1)[0][1] if printable: print(('most num is %d, all classes tend to be this num' % most_num)) locations = {} number = {} for lab, num in c.most_common(): # find the index from y_train number[lab] = num locations[lab] = np.where(np.array(y_train)==lab)[0] if printable: print('convert list(np.array) to dict format') X = {} # convert list to dict for lab, num in list(number.items()): X[lab] = X_train[locations[lab]] # oversampling if printable: print('start oversampling') for key in X: temp = X[key] while True: if len(X[key]) >= most_num: break X[key] = np.vstack((X[key], temp)) if printable: print(('first features of label 0 >', len(X[0][0]))) print('the occurrence num of each stage after oversampling') for key in X: print((key, len(X[key]))) if printable: print('make each stage have same num of instances') for key in X: X[key] = X[key][0:most_num,:] print((key, len(X[key]))) # convert dict to list if printable: print('convert from dict to list format') y_train = [] X_train = np.empty(shape=(0,len(X[0][0]))) for key in X: X_train = np.vstack( (X_train, X[key] ) ) y_train.extend([key for i in range(len(X[key]))]) # print(len(X_train), len(y_train)) c = Counter(y_train) if printable: print(('the occurrence number of each stage after oversampling: %s' % c.most_common())) # ================ End of Classes balancing return X_train, y_train ## Random def get_random_int(min=0, max=10, number=5, seed=None): """Return a list of random integer by the given range and quantity. Examples --------- >>> r = get_random_int(min=0, max=10, number=5) ... [10, 2, 3, 3, 7] """ rnd = random.Random() if seed: rnd = random.Random(seed) # return [random.randint(min,max) for p in range(0, number)] return [rnd.randint(min,max) for p in range(0, number)] # # def class_balancing_sequence_4D(X_train, y_train, sequence_length, model='downsampling' ,printable=True): # ''' 输入、输出都是sequence format # oversampling or downsampling # ''' # n_features = X_train.shape[2] # # ======== Classes balancing for sequence # if printable: # print("Classes balancing for 4D sequence training examples...") # from collections import Counter # c = Counter(y_train) # Counter({2: 454, 4: 267, 3: 124, 1: 57, 0: 48}) # if printable: # print('the occurrence number of each stage: %s' % c.most_common()) # print('the least Label %s have %s instances' % c.most_common()[-1]) # print('the most Label %s have %s instances' % c.most_common(1)[0]) # # print(c.most_common()) # [(2, 454), (4, 267), (3, 124), (1, 57), (0, 48)] # most_num = c.most_common(1)[0][1] # less_num = c.most_common()[-1][1] # # locations = {} # number = {} # for lab, num in c.most_common(): # number[lab] = num # locations[lab] = np.where(np.array(y_train)==lab)[0] # # print(locations) # # print(number) # if printable: # print(' convert list to dict') # X = {} # convert list to dict # ### a sequence # for lab, _ in number.items(): # X[lab] = np.empty(shape=(0,1,n_features,1)) # 4D # for lab, _ in number.items(): # #X[lab] = X_train[locations[lab] # for l in locations[lab]: # X[lab] = np.vstack((X[lab], X_train[l*sequence_length : (l+1)*(sequence_length)])) # # X[lab] = X_train[locations[lab]*sequence_length : locations[lab]*(sequence_length+1)] # a sequence # # print(X) # # if model=='oversampling': # if printable: # print(' oversampling -- most num is %d, all classes tend to be this num\nshuffle applied' % most_num) # for key in X: # temp = X[key] # while True: # if len(X[key]) >= most_num * sequence_length: # sequence # break # X[key] = np.vstack((X[key], temp)) # # print(key, len(X[key])) # if printable: # print(' make each stage have same num of instances') # for key in X: # X[key] = X[key][0:most_num*sequence_length,:] # sequence # if printable: # print(key, len(X[key])) # elif model=='downsampling': # import random # if printable: # print(' downsampling -- less num is %d, all classes tend to be this num by randomly choice without replacement\nshuffle applied' % less_num) # for key in X: # # print(key, len(X[key]))#, len(X[key])/sequence_length) # s_idx = [ i for i in range(int(len(X[key])/sequence_length))] # s_idx = np.asarray(s_idx)*sequence_length # start index of sequnce in X[key] # # print('s_idx',s_idx) # r_idx = np.random.choice(s_idx, less_num, replace=False) # random choice less_num of s_idx # # print('r_idx',r_idx) # temp = X[key] # X[key] = np.empty(shape=(0,1,n_features,1)) # 4D # for idx in r_idx: # X[key] = np.vstack((X[key], temp[idx:idx+sequence_length])) # # print(key, X[key]) # # np.random.choice(l, len(l), replace=False) # else: # raise Exception(' model should be oversampling or downsampling') # # # convert dict to list # if printable: # print(' convert dict to list') # y_train = [] # # X_train = np.empty(shape=(0,len(X[0][0]))) # # X_train = np.empty(shape=(0,len(X[1][0]))) # 2D # X_train = np.empty(shape=(0,1,n_features,1)) # 4D # l_key = list(X.keys()) # shuffle # random.shuffle(l_key) # shuffle # # for key in X: # no shuffle # for key in l_key: # shuffle # X_train = np.vstack( (X_train, X[key] ) ) # # print(len(X[key])) # y_train.extend([key for i in range(int(len(X[key])/sequence_length))]) # # print(X_train,y_train, type(X_train), type(y_train)) # # ================ End of Classes balancing for sequence # # print(X_train.shape, len(y_train)) # return X_train, np.asarray(y_train)
apache-2.0
dsm054/pandas
pandas/tests/arrays/test_integer.py
1
21799
# -*- coding: utf-8 -*- import numpy as np import pytest from pandas.core.dtypes.generic import ABCIndexClass import pandas as pd from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar from pandas.core.arrays import IntegerArray, integer_array from pandas.core.arrays.integer import ( Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) from pandas.tests.extension.base import BaseOpsUtil import pandas.util.testing as tm def make_data(): return (list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]) @pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype]) def dtype(request): return request.param() @pytest.fixture def data(dtype): return integer_array(make_data(), dtype=dtype) @pytest.fixture def data_missing(dtype): return integer_array([np.nan, 1], dtype=dtype) @pytest.fixture(params=['data', 'data_missing']) def all_data(request, data, data_missing): """Parametrized fixture giving 'data' and 'data_missing'""" if request.param == 'data': return data elif request.param == 'data_missing': return data_missing def test_dtypes(dtype): # smoke tests on auto dtype construction if dtype.is_signed_integer: assert np.dtype(dtype.type).kind == 'i' else: assert np.dtype(dtype.type).kind == 'u' assert dtype.name is not None class TestInterface(object): def test_repr_array(self, data): result = repr(data) # not long assert '...' not in result assert 'dtype=' in result assert 'IntegerArray' in result def test_repr_array_long(self, data): # some arrays may be able to assert a ... in the repr with pd.option_context('display.max_seq_items', 1): result = repr(data) assert '...' in result assert 'length' in result class TestConstructors(object): def test_from_dtype_from_float(self, data): # construct from our dtype & string dtype dtype = data.dtype # from float expected = pd.Series(data) result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) tm.assert_series_equal(result, expected) # from int / list expected = pd.Series(data) result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) tm.assert_series_equal(result, expected) # from int / array expected = pd.Series(data).dropna().reset_index(drop=True) dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) result = pd.Series(dropped, dtype=str(dtype)) tm.assert_series_equal(result, expected) class TestArithmeticOps(BaseOpsUtil): def _check_divmod_op(self, s, op, other, exc=None): super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) def _check_op(self, s, op_name, other, exc=None): op = self.get_op_from_name(op_name) result = op(s, other) # compute expected mask = s.isna() # if s is a DataFrame, squeeze to a Series # for comparison if isinstance(s, pd.DataFrame): result = result.squeeze() s = s.squeeze() mask = mask.squeeze() # other array is an Integer if isinstance(other, IntegerArray): omask = getattr(other, 'mask', None) mask = getattr(other, 'data', other) if omask is not None: mask |= omask # 1 ** na is na, so need to unmask those if op_name == '__pow__': mask = np.where(s == 1, False, mask) elif op_name == '__rpow__': mask = np.where(other == 1, False, mask) # float result type or float op if ((is_float_dtype(other) or is_float(other) or op_name in ['__rtruediv__', '__truediv__', '__rdiv__', '__div__'])): rs = s.astype('float') expected = op(rs, other) self._check_op_float(result, expected, mask, s, op_name, other) # integer result type else: rs = pd.Series(s.values._data) expected = op(rs, other) self._check_op_integer(result, expected, mask, s, op_name, other) def _check_op_float(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in float dtypes expected[mask] = np.nan tm.assert_series_equal(result, expected) def _check_op_integer(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in integer dtypes # to compare properly, we convert the expected # to float, mask to nans and convert infs # if we have uints then we process as uints # then conert to float # and we ultimately want to create a IntArray # for comparisons fill_value = 0 # mod/rmod turn floating 0 into NaN while # integer works as expected (no nan) if op_name in ['__mod__', '__rmod__']: if is_scalar(other): if other == 0: expected[s.values == 0] = 0 else: expected = expected.fillna(0) else: expected[(s.values == 0) & ((expected == 0) | expected.isna())] = 0 try: expected[(expected == np.inf) | (expected == -np.inf)] = fill_value original = expected expected = expected.astype(s.dtype) except ValueError: expected = expected.astype(float) expected[(expected == np.inf) | (expected == -np.inf)] = fill_value original = expected expected = expected.astype(s.dtype) expected[mask] = np.nan # assert that the expected astype is ok # (skip for unsigned as they have wrap around) if not s.dtype.is_unsigned_integer: original = pd.Series(original) # we need to fill with 0's to emulate what an astype('int') does # (truncation) for certain ops if op_name in ['__rtruediv__', '__rdiv__']: mask |= original.isna() original = original.fillna(0).astype('int') original = original.astype('float') original[mask] = np.nan tm.assert_series_equal(original, expected.astype('float')) # assert our expected result tm.assert_series_equal(result, expected) def test_arith_integer_array(self, data, all_arithmetic_operators): # we operate with a rhs of an integer array op = all_arithmetic_operators s = pd.Series(data) rhs = pd.Series([1] * len(data), dtype=data.dtype) rhs.iloc[-1] = np.nan self._check_op(s, op, rhs) def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # scalar op = all_arithmetic_operators s = pd.Series(data) self._check_op(s, op, 1, exc=TypeError) def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): # frame & scalar op = all_arithmetic_operators df = pd.DataFrame({'A': data}) self._check_op(df, op, 1, exc=TypeError) def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op = all_arithmetic_operators s = pd.Series(data) other = np.ones(len(s), dtype=s.dtype.type) self._check_op(s, op, other, exc=TypeError) def test_arith_coerce_scalar(self, data, all_arithmetic_operators): op = all_arithmetic_operators s = pd.Series(data) other = 0.01 self._check_op(s, op, other) @pytest.mark.parametrize("other", [1., 1.0, np.array(1.), np.array([1.])]) def test_arithmetic_conversion(self, all_arithmetic_operators, other): # if we have a float operand we should have a float result # if that is equal to an integer op = self.get_op_from_name(all_arithmetic_operators) s = pd.Series([1, 2, 3], dtype='Int64') result = op(s, other) assert result.dtype is np.dtype('float') @pytest.mark.parametrize("other", [0, 0.5]) def test_arith_zero_dim_ndarray(self, other): arr = integer_array([1, None, 2]) result = arr + np.array(other) expected = arr + other tm.assert_equal(result, expected) def test_error(self, data, all_arithmetic_operators): # invalid ops op = all_arithmetic_operators s = pd.Series(data) ops = getattr(s, op) opa = getattr(data, op) # invalid scalars with pytest.raises(TypeError): ops('foo') with pytest.raises(TypeError): ops(pd.Timestamp('20180101')) # invalid array-likes with pytest.raises(TypeError): ops(pd.Series('foo', index=s.index)) if op != '__rpow__': # TODO(extension) # rpow with a datetimelike coerces the integer array incorrectly with pytest.raises(TypeError): ops(pd.Series(pd.date_range('20180101', periods=len(s)))) # 2d with pytest.raises(NotImplementedError): opa(pd.DataFrame({'A': s})) with pytest.raises(NotImplementedError): opa(np.arange(len(s)).reshape(-1, len(s))) def test_pow(self): # https://github.com/pandas-dev/pandas/issues/22022 a = integer_array([1, np.nan, np.nan, 1]) b = integer_array([1, np.nan, 1, np.nan]) result = a ** b expected = pd.core.arrays.integer_array([1, np.nan, np.nan, 1]) tm.assert_extension_array_equal(result, expected) def test_rpow_one_to_na(self): # https://github.com/pandas-dev/pandas/issues/22022 arr = integer_array([np.nan, np.nan]) result = np.array([1.0, 2.0]) ** arr expected = np.array([1.0, np.nan]) tm.assert_numpy_array_equal(result, expected) class TestComparisonOps(BaseOpsUtil): def _compare_other(self, data, op_name, other): op = self.get_op_from_name(op_name) # array result = pd.Series(op(data, other)) expected = pd.Series(op(data._data, other)) # fill the nan locations expected[data._mask] = True if op_name == '__ne__' else False tm.assert_series_equal(result, expected) # series s = pd.Series(data) result = op(s, other) expected = pd.Series(data._data) expected = op(expected, other) # fill the nan locations expected[data._mask] = True if op_name == '__ne__' else False tm.assert_series_equal(result, expected) def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators self._compare_other(data, op_name, 0) def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators other = pd.Series([0] * len(data)) self._compare_other(data, op_name, other) class TestCasting(object): pass @pytest.mark.parametrize('dropna', [True, False]) def test_construct_index(self, all_data, dropna): # ensure that we do not coerce to Float64Index, rather # keep as Index all_data = all_data[:10] if dropna: other = np.array(all_data[~all_data.isna()]) else: other = all_data result = pd.Index(integer_array(other, dtype=all_data.dtype)) expected = pd.Index(other, dtype=object) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('dropna', [True, False]) def test_astype_index(self, all_data, dropna): # as an int/uint index to Index all_data = all_data[:10] if dropna: other = all_data[~all_data.isna()] else: other = all_data dtype = all_data.dtype idx = pd.Index(np.array(other)) assert isinstance(idx, ABCIndexClass) result = idx.astype(dtype) expected = idx.astype(object).astype(dtype) tm.assert_index_equal(result, expected) def test_astype(self, all_data): all_data = all_data[:10] ints = all_data[~all_data.isna()] mixed = all_data dtype = Int8Dtype() # coerce to same type - ints s = pd.Series(ints) result = s.astype(all_data.dtype) expected = pd.Series(ints) tm.assert_series_equal(result, expected) # coerce to same other - ints s = pd.Series(ints) result = s.astype(dtype) expected = pd.Series(ints, dtype=dtype) tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - ints s = pd.Series(ints) result = s.astype(all_data.dtype.numpy_dtype) expected = pd.Series(ints._data.astype( all_data.dtype.numpy_dtype)) tm.assert_series_equal(result, expected) # coerce to same type - mixed s = pd.Series(mixed) result = s.astype(all_data.dtype) expected = pd.Series(mixed) tm.assert_series_equal(result, expected) # coerce to same other - mixed s = pd.Series(mixed) result = s.astype(dtype) expected = pd.Series(mixed, dtype=dtype) tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - mixed s = pd.Series(mixed) with pytest.raises(ValueError): s.astype(all_data.dtype.numpy_dtype) # coerce to object s = pd.Series(mixed) result = s.astype('object') expected = pd.Series(np.asarray(mixed)) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('dtype', [Int8Dtype(), 'Int8', UInt32Dtype(), 'UInt32']) def test_astype_specific_casting(self, dtype): s = pd.Series([1, 2, 3], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3], dtype=dtype) tm.assert_series_equal(result, expected) s = pd.Series([1, 2, 3, None], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3, None], dtype=dtype) tm.assert_series_equal(result, expected) def test_construct_cast_invalid(self, dtype): msg = "cannot safely" arr = [1.2, 2.3, 3.7] with pytest.raises(TypeError, match=msg): integer_array(arr, dtype=dtype) with pytest.raises(TypeError, match=msg): pd.Series(arr).astype(dtype) arr = [1.2, 2.3, 3.7, np.nan] with pytest.raises(TypeError, match=msg): integer_array(arr, dtype=dtype) with pytest.raises(TypeError, match=msg): pd.Series(arr).astype(dtype) def test_frame_repr(data_missing): df = pd.DataFrame({'A': data_missing}) result = repr(df) expected = ' A\n0 NaN\n1 1' assert result == expected def test_conversions(data_missing): # astype to object series df = pd.DataFrame({'A': data_missing}) result = df['A'].astype('object') expected = pd.Series(np.array([np.nan, 1], dtype=object), name='A') tm.assert_series_equal(result, expected) # convert to object ndarray # we assert that we are exactly equal # including type conversions of scalars result = df['A'].astype('object').values expected = np.array([np.nan, 1], dtype=object) tm.assert_numpy_array_equal(result, expected) for r, e in zip(result, expected): if pd.isnull(r): assert pd.isnull(e) elif is_integer(r): # PY2 can be int or long assert r == e assert is_integer(e) else: assert r == e assert type(r) == type(e) def test_integer_array_constructor(): values = np.array([1, 2, 3, 4], dtype='int64') mask = np.array([False, False, False, True], dtype='bool') result = IntegerArray(values, mask) expected = integer_array([1, 2, 3, np.nan], dtype='int64') tm.assert_extension_array_equal(result, expected) with pytest.raises(TypeError): IntegerArray(values.tolist(), mask) with pytest.raises(TypeError): IntegerArray(values, mask.tolist()) with pytest.raises(TypeError): IntegerArray(values.astype(float), mask) with pytest.raises(TypeError): IntegerArray(values) @pytest.mark.parametrize('a, b', [ ([1, None], [1, np.nan]), ([None], [np.nan]), ([None, np.nan], [np.nan, np.nan]), ([np.nan, np.nan], [np.nan, np.nan]), ]) def test_integer_array_constructor_none_is_nan(a, b): result = integer_array(a) expected = integer_array(b) tm.assert_extension_array_equal(result, expected) def test_integer_array_constructor_copy(): values = np.array([1, 2, 3, 4], dtype='int64') mask = np.array([False, False, False, True], dtype='bool') result = IntegerArray(values, mask) assert result._data is values assert result._mask is mask result = IntegerArray(values, mask, copy=True) assert result._data is not values assert result._mask is not mask @pytest.mark.parametrize( 'values', [ ['foo', 'bar'], ['1', '2'], 'foo', 1, 1.0, pd.date_range('20130101', periods=2), np.array(['foo']), [[1, 2], [3, 4]], [np.nan, {'a': 1}]]) def test_to_integer_array_error(values): # error in converting existing arrays to IntegerArrays with pytest.raises(TypeError): integer_array(values) def test_to_integer_array_inferred_dtype(): # if values has dtype -> respect it result = integer_array(np.array([1, 2], dtype='int8')) assert result.dtype == Int8Dtype() result = integer_array(np.array([1, 2], dtype='int32')) assert result.dtype == Int32Dtype() # if values have no dtype -> always int64 result = integer_array([1, 2]) assert result.dtype == Int64Dtype() def test_to_integer_array_dtype_keyword(): result = integer_array([1, 2], dtype='int8') assert result.dtype == Int8Dtype() # if values has dtype -> override it result = integer_array(np.array([1, 2], dtype='int8'), dtype='int32') assert result.dtype == Int32Dtype() def test_to_integer_array_float(): result = integer_array([1., 2.]) expected = integer_array([1, 2]) tm.assert_extension_array_equal(result, expected) with pytest.raises(TypeError, match="cannot safely cast non-equivalent"): integer_array([1.5, 2.]) # for float dtypes, the itemsize is not preserved result = integer_array(np.array([1., 2.], dtype='float32')) assert result.dtype == Int64Dtype() @pytest.mark.parametrize( 'values, to_dtype, result_dtype', [ (np.array([1], dtype='int64'), None, Int64Dtype), (np.array([1, np.nan]), None, Int64Dtype), (np.array([1, np.nan]), 'int8', Int8Dtype)]) def test_to_integer_array(values, to_dtype, result_dtype): # convert existing arrays to IntegerArrays result = integer_array(values, dtype=to_dtype) assert result.dtype == result_dtype() expected = integer_array(values, dtype=result_dtype()) tm.assert_extension_array_equal(result, expected) def test_cross_type_arithmetic(): df = pd.DataFrame({'A': pd.Series([1, 2, np.nan], dtype='Int64'), 'B': pd.Series([1, np.nan, 3], dtype='UInt8'), 'C': [1, 2, 3]}) result = df.A + df.C expected = pd.Series([2, 4, np.nan], dtype='Int64') tm.assert_series_equal(result, expected) result = (df.A + df.C) * 3 == 12 expected = pd.Series([False, True, False]) tm.assert_series_equal(result, expected) result = df.A + df.B expected = pd.Series([2, np.nan, np.nan], dtype='Int64') tm.assert_series_equal(result, expected) @pytest.mark.parametrize('op', ['sum', 'min', 'max', 'prod']) def test_preserve_dtypes(op): # TODO(#22346): preserve Int64 dtype # for ops that enable (mean would actually work here # but generally it is a float return value) df = pd.DataFrame({ "A": ['a', 'b', 'b'], "B": [1, None, 3], "C": integer_array([1, None, 3], dtype='Int64'), }) # op result = getattr(df.C, op)() assert isinstance(result, int) # groupby result = getattr(df.groupby("A"), op)() expected = pd.DataFrame({ "B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64") }, index=pd.Index(['a', 'b'], name='A')) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('op', ['mean']) def test_reduce_to_float(op): # some reduce ops always return float, even if the result # is a rounded number df = pd.DataFrame({ "A": ['a', 'b', 'b'], "B": [1, None, 3], "C": integer_array([1, None, 3], dtype='Int64'), }) # op result = getattr(df.C, op)() assert isinstance(result, float) # groupby result = getattr(df.groupby("A"), op)() expected = pd.DataFrame({ "B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64") }, index=pd.Index(['a', 'b'], name='A')) tm.assert_frame_equal(result, expected) def test_astype_nansafe(): # see gh-22343 arr = integer_array([np.nan, 1, 2], dtype="Int8") msg = "cannot convert float NaN to integer" with pytest.raises(ValueError, match=msg): arr.astype('uint32') # TODO(jreback) - these need testing / are broken # shift # set_index (destroys type)
bsd-3-clause
jjx02230808/project0223
examples/bicluster/plot_spectral_coclustering.py
276
1736
""" ============================================== A demo of the Spectral Co-Clustering algorithm ============================================== This example demonstrates how to generate a dataset and bicluster it using the the Spectral Co-Clustering algorithm. The dataset is generated using the ``make_biclusters`` function, which creates a matrix of small values and implants bicluster with large values. The rows and columns are then shuffled and passed to the Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to make biclusters contiguous shows how accurately the algorithm found the biclusters. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_biclusters from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.metrics import consensus_score data, rows, columns = make_biclusters( shape=(300, 300), n_clusters=5, noise=5, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralCoclustering(n_clusters=5, random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.3f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.show()
bsd-3-clause
magicDGS/gatk
src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_metadata.py
3
8160
import csv import logging import numpy as np import os import pandas as pd from typing import List from . import io_commons from . import io_consts from .. import types from ..structs.metadata import SampleReadDepthMetadata, SamplePloidyMetadata, SampleCoverageMetadata, \ SampleMetadataCollection _logger = logging.getLogger(__name__) def write_sample_coverage_metadata(sample_metadata_collection: SampleMetadataCollection, sample_names: List[str], output_file: str): """Write coverage metadata for all samples in a given `SampleMetadataCollection` to a single .tsv file in the same order as `sample_names`. Args: sample_metadata_collection: an instance of `SampleMetadataCollection` sample_names: list of samples to process output_file: output .tsv file Raises: AssertionError: if some of the samples do not have `SampleCoverageMetadata` annotation Returns: None """ assert len(sample_names) > 0 assert sample_metadata_collection.all_samples_have_coverage_metadata(sample_names) contig_list = sample_metadata_collection.sample_coverage_metadata_dict[sample_names[0]].contig_list for sample_name in sample_names: assert sample_metadata_collection.sample_coverage_metadata_dict[sample_name].contig_list == contig_list parent_path = os.path.dirname(output_file) io_commons.assert_output_path_writable(parent_path) with open(output_file, 'w') as tsv_file: writer = csv.writer(tsv_file, delimiter='\t') header = [io_consts.sample_name_column_name] + [contig for contig in contig_list] writer.writerow(header) for sample_name in sample_names: sample_coverage_metadata = sample_metadata_collection.get_sample_coverage_metadata(sample_name) row = ([sample_name] + [repr(sample_coverage_metadata.n_j[j]) for j in range(len(contig_list))]) writer.writerow(row) def read_sample_coverage_metadata(sample_metadata_collection: SampleMetadataCollection, input_file: str, comment=io_consts.default_comment_char, delimiter=io_consts.default_delimiter_char) -> List[str]: """Reads sample coverage metadata from a .tsv file and adds them to `sample_metadata_collection`. Args: sample_metadata_collection: collection to which the coverage metadata is to be added input_file: input sample coverage metadata .tsv file comment: comment character delimiter: delimiter character Returns: list of samples in the same order as encountered in `input_file` """ coverage_metadata_pd = pd.read_csv(input_file, delimiter=delimiter, comment=comment) found_columns_list = [str(column) for column in coverage_metadata_pd.columns.values] io_commons.assert_mandatory_columns({io_consts.sample_name_column_name}, set(found_columns_list), input_file) contig_list = found_columns_list.copy() contig_list.remove(io_consts.sample_name_column_name) num_contigs = len(contig_list) sample_names = [] for tup in zip(coverage_metadata_pd[io_consts.sample_name_column_name], *(coverage_metadata_pd[contig] for contig in contig_list)): sample_name = str(tup[0]) n_j = np.asarray([int(tup[k + 1]) for k in range(num_contigs)], dtype=types.big_uint) sample_metadata_collection.add_sample_coverage_metadata(SampleCoverageMetadata( sample_name, n_j, contig_list)) sample_names.append(sample_name) return sample_names def update_sample_metadata_collection_from_ploidy_determination_calls( sample_metadata_collection: SampleMetadataCollection, input_calls_path: str, comment=io_consts.default_comment_char, delimiter=io_consts.default_delimiter_char): """Reads the output of contig ploidy determination tool and updates the given instance of `SampleMetadataCollection` for read depth and ploidy metadata. Args: sample_metadata_collection: the instance of `SampleMetadataCollection` to be updated input_calls_path: posterior output path of contig ploidy determination tool comment: comment character delimiter: delimiter character Returns: None """ def get_sample_read_depth_metadata(input_path: str) -> SampleReadDepthMetadata: sample_read_depth_file = os.path.join(input_path, io_consts.default_sample_read_depth_tsv_filename) assert os.path.exists(sample_read_depth_file), \ "Sample read depth could not be found in the contig ploidy results " \ "located at \"{0}\"".format(input_path) _sample_name = io_commons.extract_sample_name_from_header(sample_read_depth_file) sample_read_depth_pd = pd.read_csv(sample_read_depth_file, delimiter=delimiter, comment=comment) io_commons.assert_mandatory_columns( SampleReadDepthMetadata.mandatory_tsv_columns, {str(column) for column in sample_read_depth_pd.columns.values}, sample_read_depth_file) global_read_depth = sample_read_depth_pd[io_consts.global_read_depth_column_name].values[0] average_ploidy = sample_read_depth_pd[io_consts.average_ploidy_column_name].values[0] return SampleReadDepthMetadata(_sample_name, global_read_depth, average_ploidy) def get_sample_ploidy_metadata(input_path: str) -> SamplePloidyMetadata: sample_ploidy_file = os.path.join(input_path, io_consts.default_sample_contig_ploidy_tsv_filename) assert os.path.exists(sample_ploidy_file), \ "Sample ploidy results could not be found in the contig ploidy results " \ "located at \"{0}\"".format(input_path) _sample_name = io_commons.extract_sample_name_from_header(sample_ploidy_file) sample_ploidy_pd = pd.read_csv(sample_ploidy_file, delimiter=delimiter, comment=comment) io_commons.assert_mandatory_columns( SamplePloidyMetadata.mandatory_tsv_columns, {str(column) for column in sample_ploidy_pd.columns.values}, sample_ploidy_file) contig_list = [str(x) for x in sample_ploidy_pd[io_consts.contig_column_name].values] ploidy_list = [int(x) for x in sample_ploidy_pd[io_consts.ploidy_column_name].values] ploidy_gq_list = [float(x) for x in sample_ploidy_pd[io_consts.ploidy_gq_column_name].values] return SamplePloidyMetadata(_sample_name, np.asarray(ploidy_list, dtype=types.small_uint), np.asarray(ploidy_gq_list, dtype=types.floatX), contig_list) _logger.info("Loading germline contig ploidy and global read depth metadata...") assert os.path.exists(input_calls_path) and os.path.isdir(input_calls_path), \ "The provided path to ploidy determination results \"{0}\" is not a directory".format(input_calls_path) subdirs = os.listdir(input_calls_path) for subdir in subdirs: if subdir.find(io_consts.sample_folder_prefix) >= 0: sample_ploidy_results_dir = os.path.join(input_calls_path, subdir) sample_name = io_commons.get_sample_name_from_txt_file(sample_ploidy_results_dir) sample_read_depth_metadata = get_sample_read_depth_metadata(sample_ploidy_results_dir) sample_ploidy_metadata = get_sample_ploidy_metadata(sample_ploidy_results_dir) assert (sample_read_depth_metadata.sample_name == sample_name and sample_ploidy_metadata.sample_name == sample_name), \ "Inconsistency detected in the ploidy determination results in {0}: sample name in the .txt " \ "file does not match with sample name in the posterior headers".format(sample_ploidy_results_dir) sample_metadata_collection.add_sample_read_depth_metadata(sample_read_depth_metadata) sample_metadata_collection.add_sample_ploidy_metadata(sample_ploidy_metadata)
bsd-3-clause
zrhans/pythonanywhere
.virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_contour.py
6
8038
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six import datetime import numpy as np from matplotlib import mlab from matplotlib.testing.decorators import cleanup, image_comparison from matplotlib import pyplot as plt from nose.tools import assert_equal, assert_raises import warnings import re @cleanup def test_contour_shape_1d_valid(): x = np.arange(10) y = np.arange(9) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) ax.contour(x, y, z) @cleanup def test_contour_shape_2d_valid(): x = np.arange(10) y = np.arange(9) xg, yg = np.meshgrid(x, y) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) ax.contour(xg, yg, z) @cleanup def test_contour_shape_mismatch_1(): x = np.arange(9) y = np.arange(9) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(x, y, z) except TypeError as exc: assert exc.args[0] == 'Length of x must be number of columns in z.' @cleanup def test_contour_shape_mismatch_2(): x = np.arange(10) y = np.arange(10) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(x, y, z) except TypeError as exc: assert exc.args[0] == 'Length of y must be number of rows in z.' @cleanup def test_contour_shape_mismatch_3(): x = np.arange(10) y = np.arange(10) xg, yg = np.meshgrid(x, y) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(xg, y, z) except TypeError as exc: assert exc.args[0] == 'Number of dimensions of x and y should match.' try: ax.contour(x, yg, z) except TypeError as exc: assert exc.args[0] == 'Number of dimensions of x and y should match.' @cleanup def test_contour_shape_mismatch_4(): g = np.random.random((9, 10)) b = np.random.random((9, 9)) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(b, g, z) except TypeError as exc: print(exc.args[0]) assert re.match( r'Shape of x does not match that of z: ' + r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.', exc.args[0]) is not None try: ax.contour(g, b, z) except TypeError as exc: assert re.match( r'Shape of y does not match that of z: ' + r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.', exc.args[0]) is not None @cleanup def test_contour_shape_invalid_1(): x = np.random.random((3, 3, 3)) y = np.random.random((3, 3, 3)) z = np.random.random((9, 10)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(x, y, z) except TypeError as exc: assert exc.args[0] == 'Inputs x and y must be 1D or 2D.' @cleanup def test_contour_shape_invalid_2(): x = np.random.random((3, 3, 3)) y = np.random.random((3, 3, 3)) z = np.random.random((3, 3, 3)) fig = plt.figure() ax = fig.add_subplot(111) try: ax.contour(x, y, z) except TypeError as exc: assert exc.args[0] == 'Input z must be a 2D array.' @image_comparison(baseline_images=['contour_manual_labels']) def test_contour_manual_labels(): x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10)) z = np.max(np.dstack([abs(x), abs(y)]), 2) plt.figure(figsize=(6, 2)) cs = plt.contour(x, y, z) pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)]) plt.clabel(cs, manual=pts) @image_comparison(baseline_images=['contour_labels_size_color'], extensions=['png'], remove_text=True) def test_contour_manual_labels(): x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10)) z = np.max(np.dstack([abs(x), abs(y)]), 2) plt.figure(figsize=(6, 2)) cs = plt.contour(x, y, z) pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)]) plt.clabel(cs, manual=pts, fontsize='small', colors=('r', 'g')) @image_comparison(baseline_images=['contour_manual_colors_and_levels'], extensions=['png'], remove_text=True) def test_given_colors_levels_and_extends(): _, axes = plt.subplots(2, 4) data = np.arange(12).reshape(3, 4) colors = ['red', 'yellow', 'pink', 'blue', 'black'] levels = [2, 4, 8, 10] for i, ax in enumerate(axes.flatten()): plt.sca(ax) filled = i % 2 == 0. extend = ['neither', 'min', 'max', 'both'][i // 2] if filled: last_color = -1 if extend in ['min', 'max'] else None plt.contourf(data, colors=colors[:last_color], levels=levels, extend=extend) else: last_level = -1 if extend == 'both' else None plt.contour(data, colors=colors, levels=levels[:last_level], extend=extend) plt.colorbar() @image_comparison(baseline_images=['contour_datetime_axis'], extensions=['png'], remove_text=False) def test_contour_datetime_axis(): fig = plt.figure() fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15) base = datetime.datetime(2013, 1, 1) x = np.array([base + datetime.timedelta(days=d) for d in range(20)]) y = np.arange(20) z1, z2 = np.meshgrid(np.arange(20), np.arange(20)) z = z1 * z2 plt.subplot(221) plt.contour(x, y, z) plt.subplot(222) plt.contourf(x, y, z) x = np.repeat(x[np.newaxis], 20, axis=0) y = np.repeat(y[:, np.newaxis], 20, axis=1) plt.subplot(223) plt.contour(x, y, z) plt.subplot(224) plt.contourf(x, y, z) for ax in fig.get_axes(): for label in ax.get_xticklabels(): label.set_ha('right') label.set_rotation(30) @image_comparison(baseline_images=['contour_test_label_transforms'], extensions=['png'], remove_text=True) def test_labels(): # Adapted from pylab_examples example code: contour_demo.py # see issues #2475, #2843, and #2818 for explanation delta = 0.025 x = np.arange(-3.0, 3.0, delta) y = np.arange(-2.0, 2.0, delta) X, Y = np.meshgrid(x, y) Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1) # difference of Gaussians Z = 10.0 * (Z2 - Z1) fig, ax = plt.subplots(1, 1) CS = ax.contour(X, Y, Z) disp_units = [(216, 177), (359, 290), (521, 406)] data_units = [(-2, .5), (0, -1.5), (2.8, 1)] CS.clabel() for x, y in data_units: CS.add_label_near(x, y, inline=True, transform=None) for x, y in disp_units: CS.add_label_near(x, y, inline=True, transform=False) @image_comparison(baseline_images=['contour_corner_mask_False', 'contour_corner_mask_True'], extensions=['png'], remove_text=True) def test_corner_mask(): n = 60 mask_level = 0.95 noise_amp = 1.0 np.random.seed([1]) x, y = np.meshgrid(np.linspace(0, 2.0, n), np.linspace(0, 2.0, n)) z = np.cos(7*x)*np.sin(8*y) + noise_amp*np.random.rand(n, n) mask = np.where(np.random.rand(n, n) >= mask_level, True, False) z = np.ma.array(z, mask=mask) for corner_mask in [False, True]: fig = plt.figure() plt.contourf(z, corner_mask=corner_mask) @cleanup def test_contourf_decreasing_levels(): # github issue 5477. z = [[0.1, 0.3], [0.5, 0.7]] plt.figure() assert_raises(ValueError, plt.contourf, z, [1.0, 0.0]) # Legacy contouring algorithm gives a warning rather than raising an error, # plus a DeprecationWarning. with warnings.catch_warnings(record=True) as w: plt.contourf(z, [1.0, 0.0], corner_mask='legacy') assert_equal(len(w), 2) if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
apache-2.0
manns/pyspread
pyspread/share/templates/matplotlib/chart_bar_1_3.py
1
1883
fig = Figure() ax = fig.add_axes([.2,.05, .7, .7]) category_names = ['Strongly disagree', 'Disagree', 'Neither agree nor disagree', 'Agree', 'Strongly agree'] results = { 'Question 1': [10, 15, 17, 32, 26], 'Question 2': [26, 22, 29, 10, 13], 'Question 3': [35, 37, 7, 2, 19], 'Question 4': [32, 11, 9, 15, 33], 'Question 5': [21, 29, 5, 5, 40], 'Question 6': [8, 19, 5, 30, 38] } def survey(fig, ax, results, category_names): """ Parameters ---------- results : dict A mapping from question labels to a list of answers per category. It is assumed all lists contain the same number of entries and that it matches the length of *category_names*. category_names : list of str The category labels. """ from matplotlib import cm labels = list(results.keys()) data = numpy.array(list(results.values())) data_cum = data.cumsum(axis=1) category_colors = cm.RdYlGn(numpy.linspace(0.15, 0.85, data.shape[1])) ax.invert_yaxis() ax.xaxis.set_visible(False) ax.set_xlim(0, numpy.sum(data, axis=1).max()) for i, (colname, color) in enumerate(zip(category_names, category_colors)): widths = data[:, i] starts = data_cum[:, i] - widths ax.barh(labels, widths, left=starts, height=0.5, label=colname, color=color) xcenters = starts + widths / 2 r, g, b, _ = color text_color = 'white' if r * g * b < 0.5 else 'darkgrey' for y, (x, c) in enumerate(zip(xcenters, widths)): ax.text(x, y, str(int(c)), ha='center', va='center', color=text_color) ax.legend(ncol=2, bbox_to_anchor=(0, 1), loc='lower left', fontsize='small') ax.set_title("Normalized stacked bar chart", pad=60) return fig survey(fig, ax, results, category_names)
gpl-3.0
seaotterman/tensorflow
tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py
62
5053
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests `FeedingQueueRunner` using arrays and `DataFrames`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff from tensorflow.python.client import session from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False def get_rows(array, row_indices): rows = [array[i] for i in row_indices] return np.vstack(rows) class FeedingQueueRunnerTestCase(test.TestCase): """Tests for `FeedingQueueRunner`.""" def testArrayFeeding(self): with ops.Graph().as_default(): array = np.arange(32).reshape([16, 2]) q = ff.enqueue_data(array, capacity=100) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_dq = get_rows(array, indices) dq = sess.run(dq_op) np.testing.assert_array_equal(indices, dq[0]) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testArrayFeedingMultiThread(self): with ops.Graph().as_default(): array = np.arange(256).reshape([128, 2]) q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_dq = get_rows(array, indices) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testPandasFeeding(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(32) array2 = np.arange(32, 64) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96)) q = ff.enqueue_data(df, capacity=100) batch_size = 5 dq_op = q.dequeue_many(5) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array1.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_df_indices = df.index[indices] expected_rows = df.iloc[indices] dq = sess.run(dq_op) np.testing.assert_array_equal(expected_df_indices, dq[0]) for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) def testPandasFeedingMultiThread(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(128, 256) array2 = 2 * array1 df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128)) q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True) batch_size = 5 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_rows = df.iloc[indices] for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) if __name__ == "__main__": test.main()
apache-2.0
pratapvardhan/pandas
pandas/tests/io/parser/parse_dates.py
3
27633
# -*- coding: utf-8 -*- """ Tests date parsing functionality for all of the parsers defined in parsers.py """ from distutils.version import LooseVersion from datetime import datetime, date import pytest import numpy as np from pandas._libs.tslibs import parsing from pandas._libs.tslib import Timestamp import pandas as pd import pandas.io.parsers as parsers import pandas.core.tools.datetimes as tools import pandas.util.testing as tm import pandas.io.date_converters as conv from pandas import DataFrame, Series, Index, DatetimeIndex, MultiIndex from pandas import compat from pandas.compat import parse_date, StringIO, lrange from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.indexes.datetimes import date_range class ParseDatesTests(object): def test_separator_date_conflict(self): # Regression test for gh-4678: make sure thousands separator and # date parsing do not conflict. data = '06-02-2013;13:00;1-000.215' expected = DataFrame( [[datetime(2013, 6, 2, 13, 0, 0), 1000.215]], columns=['Date', 2] ) df = self.read_csv(StringIO(data), sep=';', thousands='-', parse_dates={'Date': [0, 1]}, header=None) tm.assert_frame_equal(df, expected) def test_multiple_date_col(self): # Can use multiple date parsers data = """\ KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 """ def func(*date_cols): res = parsing.try_parse_dates(parsers._concat_date_cols(date_cols)) return res df = self.read_csv(StringIO(data), header=None, date_parser=func, prefix='X', parse_dates={'nominal': [1, 2], 'actual': [1, 3]}) assert 'nominal' in df assert 'actual' in df assert 'X1' not in df assert 'X2' not in df assert 'X3' not in df d = datetime(1999, 1, 27, 19, 0) assert df.loc[0, 'nominal'] == d df = self.read_csv(StringIO(data), header=None, date_parser=func, parse_dates={'nominal': [1, 2], 'actual': [1, 3]}, keep_date_col=True) assert 'nominal' in df assert 'actual' in df assert 1 in df assert 2 in df assert 3 in df data = """\ KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 """ df = self.read_csv(StringIO(data), header=None, prefix='X', parse_dates=[[1, 2], [1, 3]]) assert 'X1_X2' in df assert 'X1_X3' in df assert 'X1' not in df assert 'X2' not in df assert 'X3' not in df d = datetime(1999, 1, 27, 19, 0) assert df.loc[0, 'X1_X2'] == d df = self.read_csv(StringIO(data), header=None, parse_dates=[[1, 2], [1, 3]], keep_date_col=True) assert '1_2' in df assert '1_3' in df assert 1 in df assert 2 in df assert 3 in df data = '''\ KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 ''' df = self.read_csv(StringIO(data), sep=',', header=None, parse_dates=[1], index_col=1) d = datetime(1999, 1, 27, 19, 0) assert df.index[0] == d def test_multiple_date_cols_int_cast(self): data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n" "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n" "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n" "KORD,19990127, 23:00:00, 22:56:00, -0.5900") date_spec = {'nominal': [1, 2], 'actual': [1, 3]} import pandas.io.date_converters as conv # it works! df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec, date_parser=conv.parse_date_time) assert 'nominal' in df def test_multiple_date_col_timestamp_parse(self): data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25 05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25""" result = self.read_csv(StringIO(data), sep=',', header=None, parse_dates=[[0, 1]], date_parser=Timestamp) ex_val = Timestamp('05/31/2012 15:30:00.029') assert result['0_1'][0] == ex_val def test_multiple_date_cols_with_header(self): data = """\ ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]}) assert not isinstance(df.nominal[0], compat.string_types) ts_data = """\ ID,date,nominalTime,actualTime,A,B,C,D,E KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 """ def test_multiple_date_col_name_collision(self): with pytest.raises(ValueError): self.read_csv(StringIO(self.ts_data), parse_dates={'ID': [1, 2]}) data = """\ date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa with pytest.raises(ValueError): self.read_csv(StringIO(data), parse_dates=[[1, 2]]) def test_date_parser_int_bug(self): # See gh-3071 log_file = StringIO( 'posix_timestamp,elapsed,sys,user,queries,query_time,rows,' 'accountid,userid,contactid,level,silo,method\n' '1343103150,0.062353,0,4,6,0.01690,3,' '12345,1,-1,3,invoice_InvoiceResource,search\n' ) def f(posix_string): return datetime.utcfromtimestamp(int(posix_string)) # it works! self.read_csv(log_file, index_col=0, parse_dates=[0], date_parser=f) def test_nat_parse(self): # See gh-3062 df = DataFrame(dict({ 'A': np.asarray(lrange(10), dtype='float64'), 'B': pd.Timestamp('20010101')})) df.iloc[3:6, :] = np.nan with tm.ensure_clean('__nat_parse_.csv') as path: df.to_csv(path) result = self.read_csv(path, index_col=0, parse_dates=['B']) tm.assert_frame_equal(result, df) expected = Series(dict(A='float64', B='datetime64[ns]')) tm.assert_series_equal(expected, result.dtypes) # test with NaT for the nan_rep # we don't have a method to specify the Datetime na_rep # (it defaults to '') df.to_csv(path) result = self.read_csv(path, index_col=0, parse_dates=['B']) tm.assert_frame_equal(result, df) def test_csv_custom_parser(self): data = """A,B,C 20090101,a,1,2 20090102,b,3,4 20090103,c,4,5 """ f = lambda x: datetime.strptime(x, '%Y%m%d') df = self.read_csv(StringIO(data), date_parser=f) expected = self.read_csv(StringIO(data), parse_dates=True) tm.assert_frame_equal(df, expected) def test_parse_dates_implicit_first_col(self): data = """A,B,C 20090101,a,1,2 20090102,b,3,4 20090103,c,4,5 """ df = self.read_csv(StringIO(data), parse_dates=True) expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True) assert isinstance( df.index[0], (datetime, np.datetime64, Timestamp)) tm.assert_frame_equal(df, expected) def test_parse_dates_string(self): data = """date,A,B,C 20090101,a,1,2 20090102,b,3,4 20090103,c,4,5 """ rs = self.read_csv( StringIO(data), index_col='date', parse_dates=['date']) idx = date_range('1/1/2009', periods=3) idx.name = 'date' xp = DataFrame({'A': ['a', 'b', 'c'], 'B': [1, 3, 4], 'C': [2, 4, 5]}, idx) tm.assert_frame_equal(rs, xp) def test_yy_format_with_yearfirst(self): data = """date,time,B,C 090131,0010,1,2 090228,1020,3,4 090331,0830,5,6 """ # See gh-217 import dateutil if LooseVersion(dateutil.__version__) >= LooseVersion('2.5.0'): pytest.skip("testing yearfirst=True not-support" "on datetutil < 2.5.0 this works but" "is wrong") rs = self.read_csv(StringIO(data), index_col=0, parse_dates=[['date', 'time']]) idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0), datetime(2009, 2, 28, 10, 20, 0), datetime(2009, 3, 31, 8, 30, 0)], dtype=object, name='date_time') xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx) tm.assert_frame_equal(rs, xp) rs = self.read_csv(StringIO(data), index_col=0, parse_dates=[[0, 1]]) idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0), datetime(2009, 2, 28, 10, 20, 0), datetime(2009, 3, 31, 8, 30, 0)], dtype=object, name='date_time') xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx) tm.assert_frame_equal(rs, xp) def test_parse_dates_column_list(self): data = 'a,b,c\n01/01/2010,1,15/02/2010' expected = DataFrame({'a': [datetime(2010, 1, 1)], 'b': [1], 'c': [datetime(2010, 2, 15)]}) expected = expected.set_index(['a', 'b']) df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=[0, 2], dayfirst=True) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=['a', 'c'], dayfirst=True) tm.assert_frame_equal(df, expected) def test_multi_index_parse_dates(self): data = """index1,index2,A,B,C 20090101,one,a,1,2 20090101,two,b,3,4 20090101,three,c,4,5 20090102,one,a,1,2 20090102,two,b,3,4 20090102,three,c,4,5 20090103,one,a,1,2 20090103,two,b,3,4 20090103,three,c,4,5 """ df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True) assert isinstance(df.index.levels[0][0], (datetime, np.datetime64, Timestamp)) # specify columns out of order! df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True) assert isinstance(df2.index.levels[1][0], (datetime, np.datetime64, Timestamp)) def test_parse_dates_custom_euroformat(self): text = """foo,bar,baz 31/01/2010,1,2 01/02/2010,1,NA 02/02/2010,1,2 """ parser = lambda d: parse_date(d, dayfirst=True) df = self.read_csv(StringIO(text), names=['time', 'Q', 'NTU'], header=0, index_col=0, parse_dates=True, date_parser=parser, na_values=['NA']) exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1), datetime(2010, 2, 2)], name='time') expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]}, index=exp_index, columns=['Q', 'NTU']) tm.assert_frame_equal(df, expected) parser = lambda d: parse_date(d, day_first=True) pytest.raises(TypeError, self.read_csv, StringIO(text), skiprows=[0], names=['time', 'Q', 'NTU'], index_col=0, parse_dates=True, date_parser=parser, na_values=['NA']) def test_parse_tz_aware(self): # See gh-1693 import pytz data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5") # it works result = self.read_csv(data, index_col=0, parse_dates=True) stamp = result.index[0] assert stamp.minute == 39 try: assert result.index.tz is pytz.utc except AssertionError: # hello Yaroslav arr = result.index.to_pydatetime() result = tools.to_datetime(arr, utc=True)[0] assert stamp.minute == result.minute assert stamp.hour == result.hour assert stamp.day == result.day def test_multiple_date_cols_index(self): data = """ ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 """ xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]}) df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]}, index_col='nominal') tm.assert_frame_equal(xp.set_index('nominal'), df) df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]}, index_col=0) tm.assert_frame_equal(df2, df) df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0) tm.assert_frame_equal(df3, df, check_names=False) def test_multiple_date_cols_chunked(self): df = self.read_csv(StringIO(self.ts_data), parse_dates={ 'nominal': [1, 2]}, index_col='nominal') reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal': [1, 2]}, index_col='nominal', chunksize=2) chunks = list(reader) assert 'nominalTime' not in df tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) def test_multiple_date_col_named_components(self): xp = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal': [1, 2]}, index_col='nominal') colspec = {'nominal': ['date', 'nominalTime']} df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec, index_col='nominal') tm.assert_frame_equal(df, xp) def test_multiple_date_col_multiple_index(self): df = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal': [1, 2]}, index_col=['nominal', 'ID']) xp = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal': [1, 2]}) tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df) def test_read_with_parse_dates_scalar_non_bool(self): # See gh-5636 errmsg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") data = """A,B,C 1,2,2003-11-1""" tm.assert_raises_regex(TypeError, errmsg, self.read_csv, StringIO(data), parse_dates="C") tm.assert_raises_regex(TypeError, errmsg, self.read_csv, StringIO(data), parse_dates="C", index_col="C") def test_read_with_parse_dates_invalid_type(self): errmsg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") data = """A,B,C 1,2,2003-11-1""" tm.assert_raises_regex(TypeError, errmsg, self.read_csv, StringIO(data), parse_dates=(1,)) tm.assert_raises_regex(TypeError, errmsg, self.read_csv, StringIO(data), parse_dates=np.array([4, 5])) tm.assert_raises_regex(TypeError, errmsg, self.read_csv, StringIO(data), parse_dates=set([1, 3, 3])) def test_parse_dates_empty_string(self): # see gh-2263 data = "Date, test\n2012-01-01, 1\n,2" result = self.read_csv(StringIO(data), parse_dates=["Date"], na_filter=False) assert result['Date'].isna()[1] def test_parse_dates_noconvert_thousands(self): # see gh-14066 data = 'a\n04.15.2016' expected = DataFrame([datetime(2016, 4, 15)], columns=['a']) result = self.read_csv(StringIO(data), parse_dates=['a'], thousands='.') tm.assert_frame_equal(result, expected) exp_index = DatetimeIndex(['2016-04-15'], name='a') expected = DataFrame(index=exp_index) result = self.read_csv(StringIO(data), index_col=0, parse_dates=True, thousands='.') tm.assert_frame_equal(result, expected) data = 'a,b\n04.15.2016,09.16.2013' expected = DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]], columns=['a', 'b']) result = self.read_csv(StringIO(data), parse_dates=['a', 'b'], thousands='.') tm.assert_frame_equal(result, expected) expected = DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]], columns=['a', 'b']) expected = expected.set_index(['a', 'b']) result = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True, thousands='.') tm.assert_frame_equal(result, expected) def test_parse_date_time_multi_level_column_name(self): data = """\ D,T,A,B date, time,a,b 2001-01-05, 09:00:00, 0.0, 10. 2001-01-06, 00:00:00, 1.0, 11. """ datecols = {'date_time': [0, 1]} result = self.read_csv(StringIO(data), sep=',', header=[0, 1], parse_dates=datecols, date_parser=conv.parse_date_time) expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.], [datetime(2001, 1, 6, 0, 0, 0), 1., 11.]] expected = DataFrame(expected_data, columns=['date_time', ('A', 'a'), ('B', 'b')]) tm.assert_frame_equal(result, expected) def test_parse_date_time(self): dates = np.array(['2007/1/3', '2008/2/4'], dtype=object) times = np.array(['05:07:09', '06:08:00'], dtype=object) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) result = conv.parse_date_time(dates, times) assert (result == expected).all() data = """\ date, time, a, b 2001-01-05, 10:00:00, 0.0, 10. 2001-01-05, 00:00:00, 1., 11. """ datecols = {'date_time': [0, 1]} df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_date_time) assert 'date_time' in df assert df.date_time.loc[0] == datetime(2001, 1, 5, 10, 0, 0) data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n" "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n" "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n" "KORD,19990127, 23:00:00, 22:56:00, -0.5900") date_spec = {'nominal': [1, 2], 'actual': [1, 3]} df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec, date_parser=conv.parse_date_time) def test_parse_date_fields(self): years = np.array([2007, 2008]) months = np.array([1, 2]) days = np.array([3, 4]) result = conv.parse_date_fields(years, months, days) expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)]) assert (result == expected).all() data = ("year, month, day, a\n 2001 , 01 , 10 , 10.\n" "2001 , 02 , 1 , 11.") datecols = {'ymd': [0, 1, 2]} df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_date_fields) assert 'ymd' in df assert df.ymd.loc[0] == datetime(2001, 1, 10) def test_datetime_six_col(self): years = np.array([2007, 2008]) months = np.array([1, 2]) days = np.array([3, 4]) hours = np.array([5, 6]) minutes = np.array([7, 8]) seconds = np.array([9, 0]) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) result = conv.parse_all_fields(years, months, days, hours, minutes, seconds) assert (result == expected).all() data = """\ year, month, day, hour, minute, second, a, b 2001, 01, 05, 10, 00, 0, 0.0, 10. 2001, 01, 5, 10, 0, 00, 1., 11. """ datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]} df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_all_fields) assert 'ymdHMS' in df assert df.ymdHMS.loc[0] == datetime(2001, 1, 5, 10, 0, 0) def test_datetime_fractional_seconds(self): data = """\ year, month, day, hour, minute, second, a, b 2001, 01, 05, 10, 00, 0.123456, 0.0, 10. 2001, 01, 5, 10, 0, 0.500000, 1., 11. """ datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]} df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=conv.parse_all_fields) assert 'ymdHMS' in df assert df.ymdHMS.loc[0] == datetime(2001, 1, 5, 10, 0, 0, microsecond=123456) assert df.ymdHMS.loc[1] == datetime(2001, 1, 5, 10, 0, 0, microsecond=500000) def test_generic(self): data = "year, month, day, a\n 2001, 01, 10, 10.\n 2001, 02, 1, 11." datecols = {'ym': [0, 1]} dateconverter = lambda y, m: date(year=int(y), month=int(m), day=1) df = self.read_csv(StringIO(data), sep=',', header=0, parse_dates=datecols, date_parser=dateconverter) assert 'ym' in df assert df.ym.loc[0] == date(2001, 1, 1) def test_dateparser_resolution_if_not_ns(self): # GH 10245 data = """\ date,time,prn,rxstatus 2013-11-03,19:00:00,126,00E80000 2013-11-03,19:00:00,23,00E80000 2013-11-03,19:00:00,13,00E80000 """ def date_parser(date, time): datetime = np_array_datetime64_compat( date + 'T' + time + 'Z', dtype='datetime64[s]') return datetime df = self.read_csv(StringIO(data), date_parser=date_parser, parse_dates={'datetime': ['date', 'time']}, index_col=['datetime', 'prn']) datetimes = np_array_datetime64_compat(['2013-11-03T19:00:00Z'] * 3, dtype='datetime64[s]') df_correct = DataFrame(data={'rxstatus': ['00E80000'] * 3}, index=MultiIndex.from_tuples( [(datetimes[0], 126), (datetimes[1], 23), (datetimes[2], 13)], names=['datetime', 'prn'])) tm.assert_frame_equal(df, df_correct) def test_parse_date_column_with_empty_string(self): # GH 6428 data = """case,opdate 7,10/18/2006 7,10/18/2008 621, """ result = self.read_csv(StringIO(data), parse_dates=['opdate']) expected_data = [[7, '10/18/2006'], [7, '10/18/2008'], [621, ' ']] expected = DataFrame(expected_data, columns=['case', 'opdate']) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("data,expected", [ ("a\n135217135789158401\n1352171357E+5", DataFrame({"a": [135217135789158401, 135217135700000]}, dtype="float64")), ("a\n99999999999\n123456789012345\n1234E+0", DataFrame({"a": [99999999999, 123456789012345, 1234]}, dtype="float64")) ]) @pytest.mark.parametrize("parse_dates", [True, False]) def test_parse_date_float(self, data, expected, parse_dates): # see gh-2697 # # Date parsing should fail, so we leave the data untouched # (i.e. float precision should remain unchanged). result = self.read_csv(StringIO(data), parse_dates=parse_dates) tm.assert_frame_equal(result, expected)
bsd-3-clause
gdl-civestav-localization/cinvestav_location_fingerprinting
datasets/__init__.py
1
4814
import os import pandas as pd import numpy as np from geopy.distance import vincenty __author__ = 'Gibran Felix' def save_mac_list(log_folder='anyplace_labeled'): macs = set() for file_name in os.listdir(log_folder): with open(os.path.join(log_folder, file_name), 'rb') as f: # Read document f.readline() # First line is junk for line in f.readlines(): # Check if is label if line[0] != '#': # Get mac and rss values values = line.split(' ') mac = values[4] macs.add(mac) macs = np.array(list(macs), dtype=np.str) np.savetxt( fname='mac_filters/mac_filters', fmt='%s', X=macs, delimiter=',' ) def format_samples(log_folder='anyplace_labeled'): samples = [] all_rss = {} x = 0 y = 0 floor = 0 for file_name in os.listdir(log_folder): with open(os.path.join(log_folder, file_name), 'rb') as f: # Read document f.readline() # First line is junk for line in f.readlines(): # Check if is label if line[0] != '#': values = line.split(' ') # Get labels x = values[1] y = values[2] floor = values[6] # Get mac and rss values mac = values[4] rss = values[5] all_rss[mac] = rss else: # If is label add row samples.append({ 'x': x, 'y': y, 'z': floor, 'rss': all_rss }) all_rss = {} # Add the last row samples.append({ 'x': x, 'y': y, 'z': floor, 'rss': all_rss }) return samples def normalize_coordinates(dataframe, dataset_name, separator=','): x_min = 20.667327 # dataframe['result_x'].min() y_min = -103.464568 # dataframe['result_y'].min() def distance_x(x): p1 = (x_min, y_min) p2 = (x, y_min) return vincenty(p1, p2).meters def distance_y(y): p1 = (x_min, y_min) p2 = (x_min, y) return vincenty(p1, p2).meters # Calculate the distance in meters between x_min and x for all x dataframe['result_x'] = dataframe.apply( lambda x: distance_x( x['result_x'] ), axis=1 ) # Calculate the distance in meters between y_min and y for all y dataframe['result_y'] = dataframe.apply( lambda x: distance_y( x['result_y'] ), axis=1 ) # Normalize coordinates to zero mean values x_mean = dataframe['result_x'].mean() y_mean = dataframe['result_y'].mean() dataframe['result_x'] = (dataframe['result_x'] - x_mean) dataframe['result_y'] = (dataframe['result_y'] - y_mean) # Save dataframe dataset_name = os.path.join(os.path.dirname(__file__), 'dataset', dataset_name + '.csv') dataframe.to_csv(dataset_name, sep=separator, encoding='utf-8') def parse_anyplace_log_to_dataset( log_folder, dataset_name, separator=',', with_z_label=False ): # Format anyplace_labeled log files macs = np.loadtxt( fname='mac_filters/mac_filters', delimiter=',', dtype=str ) samples = format_samples(log_folder=log_folder) # Generate dataset dataset = [] for sample in samples: row = [] for mac in macs: if mac in sample['rss']: row.append(sample['rss'][mac]) else: row.append(0) # Add labels row.append(sample['x']) row.append(sample['y']) if with_z_label: row.append(sample['z']) # Add row to dataset dataset.append(row) # Generate columns name columns = [] for ap in range(0, len(macs)): columns.append("ap{}".format(ap)) columns.append("result_x") columns.append("result_y") if with_z_label: columns.append("result_z") if with_z_label: dataset_name += '_z' df = pd.DataFrame(data=dataset, columns=columns, dtype=np.float64) normalize_coordinates( dataframe=df, separator=',', dataset_name=dataset_name ) if __name__ == "__main__": # save_mac_list(log_folder='anyplace_unlabeled') parse_anyplace_log_to_dataset( log_folder='anyplace_labeled', dataset_name='cinvestav_labeled', separator=',', with_z_label=False )
gpl-3.0
CalebBell/thermo
tests/test_fitting.py
1
7452
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2019, Caleb Bell <Caleb.Andrew.Bell@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' from fluids.numerics import assert_close, assert_close1d, linspace, horner import pytest from thermo import fitting from thermo.fitting import * import os import pandas as pd from math import log, exp from thermo.eos import PR from chemicals import SMK try: import ChebTools has_ChebTools = True except: has_ChebTools = False def test_poly_fit_statistics(): eos = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=400., P=1E6) coeffs_linear_short = [4.237517714500429e-17, -1.6220845282077796e-13, 2.767061931081117e-10, -2.7334899251582114e-07, 0.00017109676992782628, -0.06958709998929116, 18.296622011252442, -3000.9526306002426, 279584.4945619958, -11321565.153797101] assert_close1d(poly_fit_statistics(eos.Psat, coeffs_linear_short, 350, 370, pts=20), ((5.275985987963585e-13, 2.3609241443957343e-13, 0.9999999999997593, 1.0000000000010527))) coeffs_log_wide = [-6.923458272467545e-34, 3.171388201508307e-30, -6.735597262604644e-27, 8.793711247972171e-24, -7.8876802752648e-21, 5.145653902039968e-18, -2.520640133404172e-15, 9.43624298163657e-13, -2.720724561428941e-10, 6.040147808787146e-08, -1.0238798842548737e-05, 0.0013010446170104578, -0.11983655414848156, 7.513096179431395, -255.4400839702631] vals = poly_fit_statistics(eos.Psat, coeffs_log_wide, 200, 400, pts=20, interpolation_property_inv=lambda x: exp(x)) assert_close1d(vals, (4.726386844046499e-11, 5.357413863778554e-11, 0.9999999998217595, 1.000000000048253), rtol=1e-5) coeffs_linear_short_under_P = [8.908707105348414e-19, -1.755094715936469e-15, 1.4598575771661687e-12, -6.524471347238254e-10, 1.6517953770496486e-07, -2.235668677342007e-05, 0.0013781747488685666] vals = poly_fit_statistics(lambda T, P: eos.to(T=T, P=P).V_l, coeffs_linear_short_under_P, 350, 370, pts=20,arg_func=lambda T: (eos.Psat(T)*1.1,)) assert_close1d(vals, (4.8763921123136214e-12, 3.1314502494252356e-12, 0.9999999999901142, 1.0000000000100442), rtol=1e-5) coeffs_linear_short_SMK_x_trans = [-1.5401734702160141e-07, -1.0183557435431418e-05, -0.00031608282829210556, -0.006130617432051274, -0.08348304178427365, -0.8506639735258102, -6.755135004792148, -43.02254132277009, -224.5971807491821, -977.5997742351115, -3593.3395292307905, -11237.541018269085, -29909.704792375283, -67117.34552780485, -124224.64886177448, -182577.01370485546, -200814.92033568348, -150176.54405472748, -56142.62419926465, 30626.11385017538] Tc = 750.0 vals = poly_fit_statistics(lambda T: SMK(T, Tc=Tc, omega=0.04), coeffs_linear_short_SMK_x_trans, 200, 748, pts=20, interpolation_x=lambda T: log(1. - T/Tc)) assert_close1d(vals, (1.4520389207346671e-09, 9.938397794626389e-10, 0.999999997336866, 1.0000000039180956), rtol=1e-5) @pytest.mark.skipif(not has_ChebTools, reason='Missing ChebTools') def test_fit_cheb_poly(): eos = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=400., P=1E6) coeffs_linear_short = fit_cheb_poly(eos.Psat, 350, 370, 10) for T in linspace(350, 370, 30): assert_close(eos.Psat(T), horner(coeffs_linear_short, T), rtol=1e-9) # Test transformation of the output only coeffs_log_wide = fit_cheb_poly(eos.Psat, 200, 400, 15, interpolation_property=lambda x: log(x), interpolation_property_inv=lambda x: exp(x)) for T in linspace(200, 400, 30): assert_close(eos.Psat(T), exp(horner(coeffs_log_wide, T)), rtol=1e-9) # Test ability to have other arguments depend on it coeffs_linear_short_under_P = fit_cheb_poly(lambda T, P: eos.to(T=T, P=P).V_l, 350, 370, 7, arg_func=lambda T: (eos.Psat(T)*1.1,)) for T in linspace(350, 370, 30): P = eos.Psat(T)*1.1 assert_close(eos.to(T=T, P=P).V_l, horner(coeffs_linear_short_under_P, T), rtol=1e-9) # Test ability to have other arguments depend on it coeffs_log_short_above_P = fit_cheb_poly(lambda T, P: eos.to(T=T, P=P).V_g, 350, 370, 7, arg_func=lambda T: (eos.Psat(T)*.7,), interpolation_property=lambda x: log(x), interpolation_property_inv=lambda x: exp(x)) for T in linspace(350, 370, 30): P = eos.Psat(T)*0.7 assert_close(eos.to(T=T, P=P).V_g, exp(horner(coeffs_log_short_above_P, T)), rtol=1e-9) # test interpolation_x Tc = 750.0 coeffs_linear_short_SMK_x_trans = fit_cheb_poly(lambda T: SMK(T, Tc=Tc, omega=0.04), 200, 748, 20, interpolation_x=lambda T: log(1. - T/Tc), interpolation_x_inv=lambda x: -(exp(x)-1.0)*Tc) for T in linspace(200, 748, 30): x = log(1. - T/Tc) assert_close(SMK(T, Tc=Tc, omega=0.04), horner(coeffs_linear_short_SMK_x_trans, x), rtol=1e-7) # Case with one coefficient and no T bounds assert_close1d(fit_cheb_poly(func=lambda T: 102.5, low=298.15, high=298.15, n=1), [102.5]) def test_Twu91_check_params(): assert Twu91_check_params((0.694911381318495, 0.919907783415812, 1.70412689631515)) # Ian Bell, methanol assert not Twu91_check_params((0.81000842, 0.94790489, 1.49618907)) # Fit without constraints for methanol # CH4 # Twu91_check_params((0.1471, 0.9074, 1.8253)) # Should be consistent - probably a decimal problem assert not Twu91_check_params((0.0777, 0.9288, 3.0432)) # Should be inconsistent # N2 assert Twu91_check_params((0.1240, 0.8897, 2.0138))# consistent assert not Twu91_check_params((0.0760, 0.9144, 2.9857)) # inconsistent def test_Twu91_check_params_Bell(): folder = os.path.join(os.path.dirname(fitting.__file__), 'Phase Change') Bell_2018_data = pd.read_csv(os.path.join(folder, 'Bell 2018 je7b00967_si_001.tsv'), sep='\t', index_col=6) v = Bell_2018_data_values = Bell_2018_data.values for (c0, c1, c2) in zip(v[:, 2], v[:, 3], v[:, 4]): assert Twu91_check_params((c0, c1, c2))
mit
mkuai/underwater
src/core/examples/sample-rng-plot.py
188
1246
# -*- Mode:Python; -*- # /* # * This program is free software; you can redistribute it and/or modify # * it under the terms of the GNU General Public License version 2 as # * published by the Free Software Foundation # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # */ # Demonstrate use of ns-3 as a random number generator integrated with # plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial import numpy as np import matplotlib.pyplot as plt import ns.core # mu, var = 100, 225 rng = ns.core.NormalVariable(100.0, 225.0) x = [rng.GetValue() for t in range(10000)] # the histogram of the data n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75) plt.title('ns-3 histogram') plt.text(60, .025, r'$\mu=100,\ \sigma=15$') plt.axis([40, 160, 0, 0.03]) plt.grid(True) plt.show()
gpl-2.0
wackymaster/QTClock
Libraries/matplotlib/backends/backend_qt4agg.py
8
2205
""" Render to qt from agg """ from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six import os # not used import sys import ctypes import warnings import matplotlib from matplotlib.figure import Figure from .backend_qt5agg import FigureCanvasQTAggBase as _FigureCanvasQTAggBase from .backend_agg import FigureCanvasAgg from .backend_qt4 import QtCore from .backend_qt4 import FigureManagerQT from .backend_qt4 import FigureCanvasQT from .backend_qt4 import NavigationToolbar2QT ##### not used from .backend_qt4 import show from .backend_qt4 import draw_if_interactive from .backend_qt4 import backend_version ###### DEBUG = False _decref = ctypes.pythonapi.Py_DecRef _decref.argtypes = [ctypes.py_object] _decref.restype = None def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ if DEBUG: print('backend_qt4agg.new_figure_manager') FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasQTAgg(figure) return FigureManagerQT(canvas, num) class FigureCanvasQTAggBase(_FigureCanvasQTAggBase): def __init__(self, figure): self._agg_draw_pending = False class FigureCanvasQTAgg(FigureCanvasQTAggBase, FigureCanvasQT, FigureCanvasAgg): """ The canvas the figure renders into. Calls the draw and print fig methods, creates the renderers, etc... Public attribute figure - A Figure instance """ def __init__(self, figure): if DEBUG: print('FigureCanvasQtAgg: ', figure) FigureCanvasQT.__init__(self, figure) FigureCanvasQTAggBase.__init__(self, figure) FigureCanvasAgg.__init__(self, figure) self._drawRect = None self.blitbox = None self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent) FigureCanvas = FigureCanvasQTAgg FigureManager = FigureManagerQT
mit
hugobowne/scikit-learn
examples/neural_networks/plot_mlp_training_curves.py
56
3596
""" ======================================================== Compare Stochastic learning strategies for MLPClassifier ======================================================== This example visualizes some training loss curves for different stochastic learning strategies, including SGD and Adam. Because of time-constraints, we use several small datasets, for which L-BFGS might be more suitable. The general trend shown in these examples seems to carry over to larger datasets, however. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler from sklearn import datasets # different learning rate schedules and momentum parameters params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0, 'learning_rate_init': 0.2}, {'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, {'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, 'learning_rate_init': 0.2}, {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, {'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, {'algorithm': 'adam'}] labels = ["constant learning-rate", "constant with momentum", "constant with Nesterov's momentum", "inv-scaling learning-rate", "inv-scaling with momentum", "inv-scaling with Nesterov's momentum", "adam"] plot_args = [{'c': 'red', 'linestyle': '-'}, {'c': 'green', 'linestyle': '-'}, {'c': 'blue', 'linestyle': '-'}, {'c': 'red', 'linestyle': '--'}, {'c': 'green', 'linestyle': '--'}, {'c': 'blue', 'linestyle': '--'}, {'c': 'black', 'linestyle': '-'}] def plot_on_dataset(X, y, ax, name): # for each dataset, plot learning for each learning strategy print("\nlearning on dataset %s" % name) ax.set_title(name) X = MinMaxScaler().fit_transform(X) mlps = [] if name == "digits": # digits is larger but converges fairly quickly max_iter = 15 else: max_iter = 400 for label, param in zip(labels, params): print("training: %s" % label) mlp = MLPClassifier(verbose=0, random_state=0, max_iter=max_iter, **param) mlp.fit(X, y) mlps.append(mlp) print("Training set score: %f" % mlp.score(X, y)) print("Training set loss: %f" % mlp.loss_) for mlp, label, args in zip(mlps, labels, plot_args): ax.plot(mlp.loss_curve_, label=label, **args) fig, axes = plt.subplots(2, 2, figsize=(15, 10)) # load / generate some toy datasets iris = datasets.load_iris() digits = datasets.load_digits() data_sets = [(iris.data, iris.target), (digits.data, digits.target), datasets.make_circles(noise=0.2, factor=0.5, random_state=1), datasets.make_moons(noise=0.3, random_state=0)] for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', 'circles', 'moons']): plot_on_dataset(*data, ax=ax, name=name) fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center") plt.show()
bsd-3-clause
aestrivex/mne-python
examples/stats/plot_spatio_temporal_cluster_stats_sensor.py
6
5408
""" ===================================================== Spatiotemporal permutation F-test on full sensor data ===================================================== Tests for differential evoked responses in at least one condition using a permutation clustering test. The FieldTrip neighbor templates will be used to determine the adjacency between sensors. This serves as a spatial prior to the clustering. Significant spatiotemporal clusters will then be visualized using custom matplotlib code. """ # Authors: Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from mne.viz import plot_topomap import mne from mne.stats import spatio_temporal_cluster_test from mne.datasets import sample from mne.channels import read_ch_connectivity print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4} tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = mne.io.Raw(raw_fname, preload=True) raw.filter(1, 30) events = mne.read_events(event_fname) ############################################################################### # Read epochs for the channel of interest picks = mne.pick_types(raw.info, meg='mag', eog=True) reject = dict(mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, reject=reject, preload=True) epochs.drop_channels(['EOG 061']) epochs.equalize_event_counts(event_id, copy=False) condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R' X = [epochs[k].get_data() for k in condition_names] # as 3D matrix X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering # load FieldTrip neighbor definition to setup sensor connectivity connectivity, ch_names = read_ch_connectivity('neuromag306mag') ############################################################################### # Compute statistic # set cluster threshold threshold = 50.0 # very high, but the test is quite sensitive on this data # set family-wise p-value p_accept = 0.001 cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000, threshold=threshold, tail=1, n_jobs=2, connectivity=connectivity) T_obs, clusters, p_values, _ = cluster_stats good_cluster_inds = np.where(p_values < p_accept)[0] ############################################################################### # Visualize clusters # configure variables for visualization times = epochs.times * 1e3 colors = 'r', 'r', 'steelblue', 'steelblue' linestyles = '-', '--', '-', '--' # grand average as numpy arrray grand_ave = np.array(X).mean(axis=1) # get sensor positions via layout pos = mne.find_layout(epochs.info).pos # loop over significant clusters for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster infomation, get unique indices time_inds, space_inds = np.squeeze(clusters[clu_idx]) ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) # get topography for F stat f_map = T_obs[time_inds, ...].mean(axis=0) # get signals at significant sensors signals = grand_ave[..., ch_inds].mean(axis=-1) sig_times = times[time_inds] # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) title = 'Cluster #{0}'.format(i_clu + 1) fig.suptitle(title, fontsize=14) # plot average test statistic and mark significant sensors image, _ = plot_topomap(f_map, pos, mask=mask, axis=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max) # advanced matplotlib for showing image with figure and colorbar # in one plot divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format( *sig_times[[0, -1]] )) # add new axis for time courses and plot time courses ax_signals = divider.append_axes('right', size='300%', pad=1.2) for signal, name, col, ls in zip(signals, condition_names, colors, linestyles): ax_signals.plot(times, signal, color=col, linestyle=ls, label=name) # add information ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset') ax_signals.set_xlim([times[0], times[-1]]) ax_signals.set_xlabel('time [ms]') ax_signals.set_ylabel('evoked magnetic fields [fT]') # plot significant time range ymin, ymax = ax_signals.get_ylim() ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1], color='orange', alpha=0.3) ax_signals.legend(loc='lower right') ax_signals.set_ylim(ymin, ymax) # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show()
bsd-3-clause
cbertinato/pandas
pandas/tests/indexes/multi/test_constructor.py
1
22051
from collections import OrderedDict import numpy as np import pytest from pandas._libs.tslib import Timestamp from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike import pandas as pd from pandas import Index, MultiIndex, date_range import pandas.util.testing as tm def test_constructor_single_level(): result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], codes=[[0, 1, 2, 3]], names=['first']) assert isinstance(result, MultiIndex) expected = Index(['foo', 'bar', 'baz', 'qux'], name='first') tm.assert_index_equal(result.levels[0], expected) assert result.names == ['first'] def test_constructor_no_levels(): msg = "non-zero number of levels/codes" with pytest.raises(ValueError, match=msg): MultiIndex(levels=[], codes=[]) msg = "Must pass both levels and codes" with pytest.raises(TypeError, match=msg): MultiIndex(levels=[]) with pytest.raises(TypeError, match=msg): MultiIndex(codes=[]) def test_constructor_nonhashable_names(): # GH 20527 levels = [[1, 2], ['one', 'two']] codes = [[0, 0, 1, 1], [0, 1, 0, 1]] names = (['foo'], ['bar']) msg = r"MultiIndex\.name must be a hashable type" with pytest.raises(TypeError, match=msg): MultiIndex(levels=levels, codes=codes, names=names) # With .rename() mi = MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=('foo', 'bar')) renamed = [['foor'], ['barr']] with pytest.raises(TypeError, match=msg): mi.rename(names=renamed) # With .set_names() with pytest.raises(TypeError, match=msg): mi.set_names(names=renamed) def test_constructor_mismatched_codes_levels(idx): codes = [np.array([1]), np.array([2]), np.array([3])] levels = ["a"] msg = "Length of levels and codes must be the same" with pytest.raises(ValueError, match=msg): MultiIndex(levels=levels, codes=codes) length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\." " NOTE: this index is in an inconsistent state") label_error = r"Unequal code lengths: \[4, 2\]" code_value_error = r"On level 0, code value \(-2\) < -1" # important to check that it's looking at the right thing. with pytest.raises(ValueError, match=length_error): MultiIndex(levels=[['a'], ['b']], codes=[[0, 1, 2, 3], [0, 3, 4, 1]]) with pytest.raises(ValueError, match=label_error): MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]]) # external API with pytest.raises(ValueError, match=length_error): idx.copy().set_levels([['a'], ['b']]) with pytest.raises(ValueError, match=label_error): idx.copy().set_codes([[0, 0, 0, 0], [0, 0]]) # test set_codes with verify_integrity=False # the setting should not raise any value error idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False) # code value smaller than -1 with pytest.raises(ValueError, match=code_value_error): MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]]) def test_na_levels(): # GH26408 # test if codes are re-assigned value -1 for levels # with mising values (NaN, NaT, None) result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]]) expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]]) tm.assert_index_equal(result, expected) result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]]) expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]]) tm.assert_index_equal(result, expected) # verify set_levels and set_codes result = MultiIndex( levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels( [[np.nan, 's', pd.NaT, 128, None]]) tm.assert_index_equal(result, expected) result = MultiIndex( levels=[[np.nan, 's', pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]]).set_codes( [[0, -1, 1, 2, 3, 4]]) tm.assert_index_equal(result, expected) def test_labels_deprecated(idx): # GH23752 with tm.assert_produces_warning(FutureWarning): MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], labels=[[0, 1, 2, 3]], names=['first']) with tm.assert_produces_warning(FutureWarning): idx.labels def test_copy_in_constructor(): levels = np.array(["a", "b", "c"]) codes = np.array([1, 1, 2, 0, 0, 1, 1]) val = codes[0] mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True) assert mi.codes[0][0] == val codes[0] = 15 assert mi.codes[0][0] == val val = levels[0] levels[0] = "PANDA" assert mi.levels[0][0] == val # ---------------------------------------------------------------------------- # from_arrays # ---------------------------------------------------------------------------- def test_from_arrays(idx): arrays = [np.asarray(lev).take(level_codes) for lev, level_codes in zip(idx.levels, idx.codes)] # list of arrays as input result = MultiIndex.from_arrays(arrays, names=idx.names) tm.assert_index_equal(result, idx) # infer correctly result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']]) assert result.levels[0].equals(Index([Timestamp('20130101')])) assert result.levels[1].equals(Index(['a', 'b'])) def test_from_arrays_iterator(idx): # GH 18434 arrays = [np.asarray(lev).take(level_codes) for lev, level_codes in zip(idx.levels, idx.codes)] # iterator as input result = MultiIndex.from_arrays(iter(arrays), names=idx.names) tm.assert_index_equal(result, idx) # invalid iterator input msg = "Input must be a list / sequence of array-likes." with pytest.raises(TypeError, match=msg): MultiIndex.from_arrays(0) def test_from_arrays_tuples(idx): arrays = tuple(tuple(np.asarray(lev).take(level_codes)) for lev, level_codes in zip(idx.levels, idx.codes)) # tuple of tuples as input result = MultiIndex.from_arrays(arrays, names=idx.names) tm.assert_index_equal(result, idx) def test_from_arrays_index_series_datetimetz(): idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3, tz='Asia/Tokyo') result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_timedelta(): idx1 = pd.timedelta_range('1 days', freq='D', periods=3) idx2 = pd.timedelta_range('2 hours', freq='H', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_period(): idx1 = pd.period_range('2011-01-01', freq='D', periods=3) idx2 = pd.period_range('2015-01-01', freq='H', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_datetimelike_mixed(): idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3) idx3 = pd.timedelta_range('1 days', freq='D', periods=3) idx4 = pd.period_range('2011-01-01', freq='D', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) tm.assert_index_equal(result.get_level_values(2), idx3) tm.assert_index_equal(result.get_level_values(3), idx4) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2), pd.Series(idx3), pd.Series(idx4)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result2.get_level_values(2), idx3) tm.assert_index_equal(result2.get_level_values(3), idx4) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_categorical(): # GH13743 idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values]) tm.assert_index_equal(result3.get_level_values(0), idx1) tm.assert_index_equal(result3.get_level_values(1), idx2) def test_from_arrays_empty(): # 0 levels msg = "Must pass non-zero number of levels/codes" with pytest.raises(ValueError, match=msg): MultiIndex.from_arrays(arrays=[]) # 1 level result = MultiIndex.from_arrays(arrays=[[]], names=['A']) assert isinstance(result, MultiIndex) expected = Index([], name='A') tm.assert_index_equal(result.levels[0], expected) # N levels for N in [2, 3]: arrays = [[]] * N names = list('ABC')[:N] result = MultiIndex.from_arrays(arrays=arrays, names=names) expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('invalid_sequence_of_arrays', [ 1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'], (1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'), [(1,), 2], [1, (2,)], [('a',), 'b'], ((1,), 2), (1, (2,)), (('a',), 'b') ]) def test_from_arrays_invalid_input(invalid_sequence_of_arrays): msg = "Input must be a list / sequence of array-likes" with pytest.raises(TypeError, match=msg): MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays) @pytest.mark.parametrize('idx1, idx2', [ ([1, 2, 3], ['a', 'b']), ([], ['a', 'b']), ([1, 2, 3], []) ]) def test_from_arrays_different_lengths(idx1, idx2): # see gh-13599 msg = '^all arrays must be same length$' with pytest.raises(ValueError, match=msg): MultiIndex.from_arrays([idx1, idx2]) # ---------------------------------------------------------------------------- # from_tuples # ---------------------------------------------------------------------------- def test_from_tuples(): msg = 'Cannot infer number of levels from empty list' with pytest.raises(TypeError, match=msg): MultiIndex.from_tuples([]) expected = MultiIndex(levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=['a', 'b']) # input tuples result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b']) tm.assert_index_equal(result, expected) def test_from_tuples_iterator(): # GH 18434 # input iterator for tuples expected = MultiIndex(levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=['a', 'b']) result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b']) tm.assert_index_equal(result, expected) # input non-iterables msg = 'Input must be a list / sequence of tuple-likes.' with pytest.raises(TypeError, match=msg): MultiIndex.from_tuples(0) def test_from_tuples_empty(): # GH 16777 result = MultiIndex.from_tuples([], names=['a', 'b']) expected = MultiIndex.from_arrays(arrays=[[], []], names=['a', 'b']) tm.assert_index_equal(result, expected) def test_from_tuples_index_values(idx): result = MultiIndex.from_tuples(idx) assert (result.values == idx.values).all() def test_tuples_with_name_string(): # GH 15110 and GH 14848 li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] msg = "Names should be list-like for a MultiIndex" with pytest.raises(ValueError, match=msg): pd.Index(li, name='abc') with pytest.raises(ValueError, match=msg): pd.Index(li, name='a') def test_from_tuples_with_tuple_label(): # GH 15457 expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]], columns=['a', 'b', 'c']).set_index(['a', 'b']) idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b')) result = pd.DataFrame([2, 3], columns=['c'], index=idx) tm.assert_frame_equal(expected, result) # ---------------------------------------------------------------------------- # from_product # ---------------------------------------------------------------------------- def test_from_product_empty_zero_levels(): # 0 levels msg = "Must pass non-zero number of levels/codes" with pytest.raises(ValueError, match=msg): MultiIndex.from_product([]) def test_from_product_empty_one_level(): result = MultiIndex.from_product([[]], names=['A']) expected = pd.Index([], name='A') tm.assert_index_equal(result.levels[0], expected) @pytest.mark.parametrize('first, second', [ ([], []), (['foo', 'bar', 'baz'], []), ([], ['a', 'b', 'c']), ]) def test_from_product_empty_two_levels(first, second): names = ['A', 'B'] result = MultiIndex.from_product([first, second], names=names) expected = MultiIndex(levels=[first, second], codes=[[], []], names=names) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('N', list(range(4))) def test_from_product_empty_three_levels(N): # GH12258 names = ['A', 'B', 'C'] lvl2 = list(range(N)) result = MultiIndex.from_product([[], lvl2, []], names=names) expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('invalid_input', [ 1, [1], [1, 2], [[1], 2], 'a', ['a'], ['a', 'b'], [['a'], 'b'], ]) def test_from_product_invalid_input(invalid_input): msg = (r"Input must be a list / sequence of iterables|" "Input must be list-like") with pytest.raises(TypeError, match=msg): MultiIndex.from_product(iterables=invalid_input) def test_from_product_datetimeindex(): dt_index = date_range('2000-01-01', periods=2) mi = pd.MultiIndex.from_product([[1, 2], dt_index]) etalon = construct_1d_object_array_from_listlike([ (1, pd.Timestamp('2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp('2000-01-01')), (2, pd.Timestamp('2000-01-02')), ]) tm.assert_numpy_array_equal(mi.values, etalon) @pytest.mark.parametrize('ordered', [False, True]) @pytest.mark.parametrize('f', [ lambda x: x, lambda x: pd.Series(x), lambda x: x.values ]) def test_from_product_index_series_categorical(ordered, f): # GH13743 first = ['foo', 'bar'] idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered) expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered) result = pd.MultiIndex.from_product([first, f(idx)]) tm.assert_index_equal(result.get_level_values(1), expected) def test_from_product(): first = ['foo', 'bar', 'buz'] second = ['a', 'b', 'c'] names = ['first', 'second'] result = MultiIndex.from_product([first, second], names=names) tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), ('buz', 'c')] expected = MultiIndex.from_tuples(tuples, names=names) tm.assert_index_equal(result, expected) def test_from_product_iterator(): # GH 18434 first = ['foo', 'bar', 'buz'] second = ['a', 'b', 'c'] names = ['first', 'second'] tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), ('buz', 'c')] expected = MultiIndex.from_tuples(tuples, names=names) # iterator as input result = MultiIndex.from_product(iter([first, second]), names=names) tm.assert_index_equal(result, expected) # Invalid non-iterable input msg = "Input must be a list / sequence of iterables." with pytest.raises(TypeError, match=msg): MultiIndex.from_product(0) def test_create_index_existing_name(idx): # GH11193, when an existing index is passed, and a new name is not # specified, the new index should inherit the previous object name index = idx index.names = ['foo', 'bar'] result = pd.Index(index) expected = Index( Index([ ('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object' ), names=['foo', 'bar'] ) tm.assert_index_equal(result, expected) result = pd.Index(index, names=['A', 'B']) expected = Index( Index([ ('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object' ), names=['A', 'B'] ) tm.assert_index_equal(result, expected) # ---------------------------------------------------------------------------- # from_frame # ---------------------------------------------------------------------------- def test_from_frame(): # GH 22420 df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']], columns=['L1', 'L2']) expected = pd.MultiIndex.from_tuples([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')], names=['L1', 'L2']) result = pd.MultiIndex.from_frame(df) tm.assert_index_equal(expected, result) @pytest.mark.parametrize('non_frame', [ pd.Series([1, 2, 3, 4]), [1, 2, 3, 4], [[1, 2], [3, 4], [5, 6]], pd.Index([1, 2, 3, 4]), np.array([[1, 2], [3, 4], [5, 6]]), 27 ]) def test_from_frame_error(non_frame): # GH 22420 with pytest.raises(TypeError, match='Input must be a DataFrame'): pd.MultiIndex.from_frame(non_frame) def test_from_frame_dtype_fidelity(): # GH 22420 df = pd.DataFrame(OrderedDict([ ('dates', pd.date_range('19910905', periods=6, tz='US/Eastern')), ('a', [1, 1, 1, 2, 2, 2]), ('b', pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True)), ('c', ['x', 'x', 'y', 'z', 'x', 'y']) ])) original_dtypes = df.dtypes.to_dict() expected_mi = pd.MultiIndex.from_arrays([ pd.date_range('19910905', periods=6, tz='US/Eastern'), [1, 1, 1, 2, 2, 2], pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True), ['x', 'x', 'y', 'z', 'x', 'y'] ], names=['dates', 'a', 'b', 'c']) mi = pd.MultiIndex.from_frame(df) mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} tm.assert_index_equal(expected_mi, mi) assert original_dtypes == mi_dtypes @pytest.mark.parametrize('names_in,names_out', [ (None, [('L1', 'x'), ('L2', 'y')]), (['x', 'y'], ['x', 'y']), ]) def test_from_frame_valid_names(names_in, names_out): # GH 22420 df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']], columns=pd.MultiIndex.from_tuples([('L1', 'x'), ('L2', 'y')])) mi = pd.MultiIndex.from_frame(df, names=names_in) assert mi.names == names_out @pytest.mark.parametrize('names,expected_error_msg', [ ('bad_input', "Names should be list-like for a MultiIndex"), (['a', 'b', 'c'], "Length of names must match number of levels in MultiIndex") ]) def test_from_frame_invalid_names(names, expected_error_msg): # GH 22420 df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']], columns=pd.MultiIndex.from_tuples([('L1', 'x'), ('L2', 'y')])) with pytest.raises(ValueError, match=expected_error_msg): pd.MultiIndex.from_frame(df, names=names)
bsd-3-clause
shahankhatch/scikit-learn
examples/applications/plot_stock_market.py
227
8284
""" ======================================= Visualizing the stock market structure ======================================= This example employs several unsupervised learning techniques to extract the stock market structure from variations in historical quotes. The quantity that we use is the daily variation in quote price: quotes that are linked tend to cofluctuate during a day. .. _stock_market: Learning a graph structure -------------------------- We use sparse inverse covariance estimation to find which quotes are correlated conditionally on the others. Specifically, sparse inverse covariance gives us a graph, that is a list of connection. For each symbol, the symbols that it is connected too are those useful to explain its fluctuations. Clustering ---------- We use clustering to group together quotes that behave similarly. Here, amongst the :ref:`various clustering techniques <clustering>` available in the scikit-learn, we use :ref:`affinity_propagation` as it does not enforce equal-size clusters, and it can choose automatically the number of clusters from the data. Note that this gives us a different indication than the graph, as the graph reflects conditional relations between variables, while the clustering reflects marginal properties: variables clustered together can be considered as having a similar impact at the level of the full stock market. Embedding in 2D space --------------------- For visualization purposes, we need to lay out the different symbols on a 2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D embedding. Visualization ------------- The output of the 3 models are combined in a 2D graph where nodes represents the stocks and edges the: - cluster labels are used to define the color of the nodes - the sparse covariance model is used to display the strength of the edges - the 2D embedding is used to position the nodes in the plan This example has a fair amount of visualization-related code, as visualization is crucial here to display the graph. One of the challenge is to position the labels minimizing overlap. For this we use an heuristic based on the direction of the nearest neighbor along each axis. """ print(__doc__) # Author: Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause import datetime import numpy as np import matplotlib.pyplot as plt from matplotlib import finance from matplotlib.collections import LineCollection from sklearn import cluster, covariance, manifold ############################################################################### # Retrieve the data from Internet # Choose a time period reasonnably calm (not too long ago so that we get # high-tech firms, and before the 2008 crash) d1 = datetime.datetime(2003, 1, 1) d2 = datetime.datetime(2008, 1, 1) # kraft symbol has now changed from KFT to MDLZ in yahoo symbol_dict = { 'TOT': 'Total', 'XOM': 'Exxon', 'CVX': 'Chevron', 'COP': 'ConocoPhillips', 'VLO': 'Valero Energy', 'MSFT': 'Microsoft', 'IBM': 'IBM', 'TWX': 'Time Warner', 'CMCSA': 'Comcast', 'CVC': 'Cablevision', 'YHOO': 'Yahoo', 'DELL': 'Dell', 'HPQ': 'HP', 'AMZN': 'Amazon', 'TM': 'Toyota', 'CAJ': 'Canon', 'MTU': 'Mitsubishi', 'SNE': 'Sony', 'F': 'Ford', 'HMC': 'Honda', 'NAV': 'Navistar', 'NOC': 'Northrop Grumman', 'BA': 'Boeing', 'KO': 'Coca Cola', 'MMM': '3M', 'MCD': 'Mc Donalds', 'PEP': 'Pepsi', 'MDLZ': 'Kraft Foods', 'K': 'Kellogg', 'UN': 'Unilever', 'MAR': 'Marriott', 'PG': 'Procter Gamble', 'CL': 'Colgate-Palmolive', 'GE': 'General Electrics', 'WFC': 'Wells Fargo', 'JPM': 'JPMorgan Chase', 'AIG': 'AIG', 'AXP': 'American express', 'BAC': 'Bank of America', 'GS': 'Goldman Sachs', 'AAPL': 'Apple', 'SAP': 'SAP', 'CSCO': 'Cisco', 'TXN': 'Texas instruments', 'XRX': 'Xerox', 'LMT': 'Lookheed Martin', 'WMT': 'Wal-Mart', 'WBA': 'Walgreen', 'HD': 'Home Depot', 'GSK': 'GlaxoSmithKline', 'PFE': 'Pfizer', 'SNY': 'Sanofi-Aventis', 'NVS': 'Novartis', 'KMB': 'Kimberly-Clark', 'R': 'Ryder', 'GD': 'General Dynamics', 'RTN': 'Raytheon', 'CVS': 'CVS', 'CAT': 'Caterpillar', 'DD': 'DuPont de Nemours'} symbols, names = np.array(list(symbol_dict.items())).T quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True) for symbol in symbols] open = np.array([q.open for q in quotes]).astype(np.float) close = np.array([q.close for q in quotes]).astype(np.float) # The daily variations of the quotes are what carry most information variation = close - open ############################################################################### # Learn a graphical structure from the correlations edge_model = covariance.GraphLassoCV() # standardize the time series: using correlations rather than covariance # is more efficient for structure recovery X = variation.copy().T X /= X.std(axis=0) edge_model.fit(X) ############################################################################### # Cluster using affinity propagation _, labels = cluster.affinity_propagation(edge_model.covariance_) n_labels = labels.max() for i in range(n_labels + 1): print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i]))) ############################################################################### # Find a low-dimension embedding for visualization: find the best position of # the nodes (the stocks) on a 2D plane # We use a dense eigen_solver to achieve reproducibility (arpack is # initiated with random vectors that we don't control). In addition, we # use a large number of neighbors to capture the large-scale structure. node_position_model = manifold.LocallyLinearEmbedding( n_components=2, eigen_solver='dense', n_neighbors=6) embedding = node_position_model.fit_transform(X.T).T ############################################################################### # Visualization plt.figure(1, facecolor='w', figsize=(10, 8)) plt.clf() ax = plt.axes([0., 0., 1., 1.]) plt.axis('off') # Display a graph of the partial correlations partial_correlations = edge_model.precision_.copy() d = 1 / np.sqrt(np.diag(partial_correlations)) partial_correlations *= d partial_correlations *= d[:, np.newaxis] non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02) # Plot the nodes using the coordinates of our embedding plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=plt.cm.spectral) # Plot the edges start_idx, end_idx = np.where(non_zero) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[embedding[:, start], embedding[:, stop]] for start, stop in zip(start_idx, end_idx)] values = np.abs(partial_correlations[non_zero]) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, .7 * values.max())) lc.set_array(values) lc.set_linewidths(15 * values) ax.add_collection(lc) # Add a label to each node. The challenge here is that we want to # position the labels to avoid overlap with other labels for index, (name, label, (x, y)) in enumerate( zip(names, labels, embedding.T)): dx = x - embedding[0] dx[index] = 1 dy = y - embedding[1] dy[index] = 1 this_dx = dx[np.argmin(np.abs(dy))] this_dy = dy[np.argmin(np.abs(dx))] if this_dx > 0: horizontalalignment = 'left' x = x + .002 else: horizontalalignment = 'right' x = x - .002 if this_dy > 0: verticalalignment = 'bottom' y = y + .002 else: verticalalignment = 'top' y = y - .002 plt.text(x, y, name, size=10, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, bbox=dict(facecolor='w', edgecolor=plt.cm.spectral(label / float(n_labels)), alpha=.6)) plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(), embedding[0].max() + .10 * embedding[0].ptp(),) plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(), embedding[1].max() + .03 * embedding[1].ptp()) plt.show()
bsd-3-clause
beepee14/scikit-learn
examples/applications/plot_tomography_l1_reconstruction.py
204
5442
""" ====================================================================== Compressive sensing: tomography reconstruction with L1 prior (Lasso) ====================================================================== This example shows the reconstruction of an image from a set of parallel projections, acquired along different angles. Such a dataset is acquired in **computed tomography** (CT). Without any prior information on the sample, the number of projections required to reconstruct the image is of the order of the linear size ``l`` of the image (in pixels). For simplicity we consider here a sparse image, where only pixels on the boundary of objects have a non-zero value. Such data could correspond for example to a cellular material. Note however that most images are sparse in a different basis, such as the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is necessary to use prior information available on the sample (its sparsity): this is an example of **compressive sensing**. The tomography projection operation is a linear transformation. In addition to the data-fidelity term corresponding to a linear regression, we penalize the L1 norm of the image to account for its sparsity. The resulting optimization problem is called the :ref:`lasso`. We use the class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent algorithm. Importantly, this implementation is more computationally efficient on a sparse matrix, than the projection operator used here. The reconstruction with L1 penalization gives a result with zero error (all pixels are successfully labeled with 0 or 1), even if noise was added to the projections. In comparison, an L2 penalization (:class:`sklearn.linear_model.Ridge`) produces a large number of labeling errors for the pixels. Important artifacts are observed on the reconstructed image, contrary to the L1 penalization. Note in particular the circular artifact separating the pixels in the corners, that have contributed to fewer projections than the central disk. """ print(__doc__) # Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org> # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import ndimage from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge import matplotlib.pyplot as plt def _weights(x, dx=1, orig=0): x = np.ravel(x) floor_x = np.floor((x - orig) / dx) alpha = (x - orig - floor_x * dx) / dx return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha)) def _generate_center_coordinates(l_x): X, Y = np.mgrid[:l_x, :l_x] center = l_x / 2. X += 0.5 - center Y += 0.5 - center return X, Y def build_projection_operator(l_x, n_dir): """ Compute the tomography design matrix. Parameters ---------- l_x : int linear size of image array n_dir : int number of angles at which projections are acquired. Returns ------- p : sparse matrix of shape (n_dir l_x, l_x**2) """ X, Y = _generate_center_coordinates(l_x) angles = np.linspace(0, np.pi, n_dir, endpoint=False) data_inds, weights, camera_inds = [], [], [] data_unravel_indices = np.arange(l_x ** 2) data_unravel_indices = np.hstack((data_unravel_indices, data_unravel_indices)) for i, angle in enumerate(angles): Xrot = np.cos(angle) * X - np.sin(angle) * Y inds, w = _weights(Xrot, dx=1, orig=X.min()) mask = np.logical_and(inds >= 0, inds < l_x) weights += list(w[mask]) camera_inds += list(inds[mask] + i * l_x) data_inds += list(data_unravel_indices[mask]) proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds))) return proj_operator def generate_synthetic_data(): """ Synthetic binary data """ rs = np.random.RandomState(0) n_pts = 36. x, y = np.ogrid[0:l, 0:l] mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2 mask = np.zeros((l, l)) points = l * rs.rand(2, n_pts) mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1 mask = ndimage.gaussian_filter(mask, sigma=l / n_pts) res = np.logical_and(mask > mask.mean(), mask_outer) return res - ndimage.binary_erosion(res) # Generate synthetic images, and projections l = 128 proj_operator = build_projection_operator(l, l / 7.) data = generate_synthetic_data() proj = proj_operator * data.ravel()[:, np.newaxis] proj += 0.15 * np.random.randn(*proj.shape) # Reconstruction with L2 (Ridge) penalization rgr_ridge = Ridge(alpha=0.2) rgr_ridge.fit(proj_operator, proj.ravel()) rec_l2 = rgr_ridge.coef_.reshape(l, l) # Reconstruction with L1 (Lasso) penalization # the best value of alpha was determined using cross validation # with LassoCV rgr_lasso = Lasso(alpha=0.001) rgr_lasso.fit(proj_operator, proj.ravel()) rec_l1 = rgr_lasso.coef_.reshape(l, l) plt.figure(figsize=(8, 3.3)) plt.subplot(131) plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest') plt.axis('off') plt.title('original image') plt.subplot(132) plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest') plt.title('L2 penalization') plt.axis('off') plt.subplot(133) plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest') plt.title('L1 penalization') plt.axis('off') plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) plt.show()
bsd-3-clause
idlead/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
67
9084
import numpy as np from sklearn.utils import check_array from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import TempMemmap from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_lassocd_readonly_data(): n_components = 12 with TempMemmap(X) as X_read_only: dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X_read_only).transform(X_read_only) assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[np.newaxis, 1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[np.newaxis, 1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample[np.newaxis, :]) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_input(): n_components = 100 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] Xf = check_array(X, order='F') for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): a = sparse_encode(X, V, algorithm=algo) b = sparse_encode(Xf, V, algorithm=algo) assert_array_almost_equal(a, b) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
wesm/arrow
python/pyarrow/tests/test_ipc.py
4
29983
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import UserList import io import pathlib import pytest import socket import threading import weakref import numpy as np import pyarrow as pa from pyarrow.tests.util import changed_environ try: from pandas.testing import assert_frame_equal, assert_series_equal import pandas as pd except ImportError: pass class IpcFixture: write_stats = None def __init__(self, sink_factory=lambda: io.BytesIO()): self._sink_factory = sink_factory self.sink = self.get_sink() def get_sink(self): return self._sink_factory() def get_source(self): return self.sink.getvalue() def write_batches(self, num_batches=5, as_table=False): nrows = 5 schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())]) writer = self._get_writer(self.sink, schema) batches = [] for i in range(num_batches): batch = pa.record_batch( [np.random.randn(nrows), ['foo', None, 'bar', 'bazbaz', 'qux']], schema=schema) batches.append(batch) if as_table: table = pa.Table.from_batches(batches) writer.write_table(table) else: for batch in batches: writer.write_batch(batch) self.write_stats = writer.stats writer.close() return batches class FileFormatFixture(IpcFixture): def _get_writer(self, sink, schema): return pa.ipc.new_file(sink, schema) def _check_roundtrip(self, as_table=False): batches = self.write_batches(as_table=as_table) file_contents = pa.BufferReader(self.get_source()) reader = pa.ipc.open_file(file_contents) assert reader.num_record_batches == len(batches) for i, batch in enumerate(batches): # it works. Must convert back to DataFrame batch = reader.get_batch(i) assert batches[i].equals(batch) assert reader.schema.equals(batches[0].schema) assert isinstance(reader.stats, pa.ipc.ReadStats) assert isinstance(self.write_stats, pa.ipc.WriteStats) assert tuple(reader.stats) == tuple(self.write_stats) class StreamFormatFixture(IpcFixture): # ARROW-6474, for testing writing old IPC protocol with 4-byte prefix use_legacy_ipc_format = False # ARROW-9395, for testing writing old metadata version options = None def _get_writer(self, sink, schema): return pa.ipc.new_stream( sink, schema, use_legacy_format=self.use_legacy_ipc_format, options=self.options, ) class MessageFixture(IpcFixture): def _get_writer(self, sink, schema): return pa.RecordBatchStreamWriter(sink, schema) @pytest.fixture def ipc_fixture(): return IpcFixture() @pytest.fixture def file_fixture(): return FileFormatFixture() @pytest.fixture def stream_fixture(): return StreamFormatFixture() def test_empty_file(): buf = b'' with pytest.raises(pa.ArrowInvalid): pa.ipc.open_file(pa.BufferReader(buf)) def test_file_simple_roundtrip(file_fixture): file_fixture._check_roundtrip(as_table=False) def test_file_write_table(file_fixture): file_fixture._check_roundtrip(as_table=True) @pytest.mark.parametrize("sink_factory", [ lambda: io.BytesIO(), lambda: pa.BufferOutputStream() ]) def test_file_read_all(sink_factory): fixture = FileFormatFixture(sink_factory) batches = fixture.write_batches() file_contents = pa.BufferReader(fixture.get_source()) reader = pa.ipc.open_file(file_contents) result = reader.read_all() expected = pa.Table.from_batches(batches) assert result.equals(expected) def test_open_file_from_buffer(file_fixture): # ARROW-2859; APIs accept the buffer protocol file_fixture.write_batches() source = file_fixture.get_source() reader1 = pa.ipc.open_file(source) reader2 = pa.ipc.open_file(pa.BufferReader(source)) reader3 = pa.RecordBatchFileReader(source) result1 = reader1.read_all() result2 = reader2.read_all() result3 = reader3.read_all() assert result1.equals(result2) assert result1.equals(result3) st1 = reader1.stats assert st1.num_messages == 6 assert st1.num_record_batches == 5 assert reader2.stats == st1 assert reader3.stats == st1 @pytest.mark.pandas def test_file_read_pandas(file_fixture): frames = [batch.to_pandas() for batch in file_fixture.write_batches()] file_contents = pa.BufferReader(file_fixture.get_source()) reader = pa.ipc.open_file(file_contents) result = reader.read_pandas() expected = pd.concat(frames).reset_index(drop=True) assert_frame_equal(result, expected) def test_file_pathlib(file_fixture, tmpdir): file_fixture.write_batches() source = file_fixture.get_source() path = tmpdir.join('file.arrow').strpath with open(path, 'wb') as f: f.write(source) t1 = pa.ipc.open_file(pathlib.Path(path)).read_all() t2 = pa.ipc.open_file(pa.OSFile(path)).read_all() assert t1.equals(t2) def test_empty_stream(): buf = io.BytesIO(b'') with pytest.raises(pa.ArrowInvalid): pa.ipc.open_stream(buf) @pytest.mark.pandas def test_stream_categorical_roundtrip(stream_fixture): df = pd.DataFrame({ 'one': np.random.randn(5), 'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'], categories=['foo', 'bar'], ordered=True) }) batch = pa.RecordBatch.from_pandas(df) with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr: wr.write_batch(batch) table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source())) .read_all()) assert_frame_equal(table.to_pandas(), df) def test_open_stream_from_buffer(stream_fixture): # ARROW-2859 stream_fixture.write_batches() source = stream_fixture.get_source() reader1 = pa.ipc.open_stream(source) reader2 = pa.ipc.open_stream(pa.BufferReader(source)) reader3 = pa.RecordBatchStreamReader(source) result1 = reader1.read_all() result2 = reader2.read_all() result3 = reader3.read_all() assert result1.equals(result2) assert result1.equals(result3) st1 = reader1.stats assert st1.num_messages == 6 assert st1.num_record_batches == 5 assert reader2.stats == st1 assert reader3.stats == st1 assert tuple(st1) == tuple(stream_fixture.write_stats) @pytest.mark.pandas def test_stream_write_dispatch(stream_fixture): # ARROW-1616 df = pd.DataFrame({ 'one': np.random.randn(5), 'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'], categories=['foo', 'bar'], ordered=True) }) table = pa.Table.from_pandas(df, preserve_index=False) batch = pa.RecordBatch.from_pandas(df, preserve_index=False) with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr: wr.write(table) wr.write(batch) table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source())) .read_all()) assert_frame_equal(table.to_pandas(), pd.concat([df, df], ignore_index=True)) @pytest.mark.pandas def test_stream_write_table_batches(stream_fixture): # ARROW-504 df = pd.DataFrame({ 'one': np.random.randn(20), }) b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False) b2 = pa.RecordBatch.from_pandas(df, preserve_index=False) table = pa.Table.from_batches([b1, b2, b1]) with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr: wr.write_table(table, max_chunksize=15) batches = list(pa.ipc.open_stream(stream_fixture.get_source())) assert list(map(len, batches)) == [10, 15, 5, 10] result_table = pa.Table.from_batches(batches) assert_frame_equal(result_table.to_pandas(), pd.concat([df[:10], df, df[:10]], ignore_index=True)) @pytest.mark.parametrize('use_legacy_ipc_format', [False, True]) def test_stream_simple_roundtrip(stream_fixture, use_legacy_ipc_format): stream_fixture.use_legacy_ipc_format = use_legacy_ipc_format batches = stream_fixture.write_batches() file_contents = pa.BufferReader(stream_fixture.get_source()) reader = pa.ipc.open_stream(file_contents) assert reader.schema.equals(batches[0].schema) total = 0 for i, next_batch in enumerate(reader): assert next_batch.equals(batches[i]) total += 1 assert total == len(batches) with pytest.raises(StopIteration): reader.read_next_batch() def test_write_options(): options = pa.ipc.IpcWriteOptions() assert options.allow_64bit is False assert options.use_legacy_format is False assert options.metadata_version == pa.ipc.MetadataVersion.V5 options.allow_64bit = True assert options.allow_64bit is True options.use_legacy_format = True assert options.use_legacy_format is True options.metadata_version = pa.ipc.MetadataVersion.V4 assert options.metadata_version == pa.ipc.MetadataVersion.V4 for value in ('V5', 42): with pytest.raises((TypeError, ValueError)): options.metadata_version = value assert options.compression is None for value in ['lz4', 'zstd']: options.compression = value assert options.compression == value options.compression = value.upper() assert options.compression == value options.compression = None assert options.compression is None assert options.use_threads is True options.use_threads = False assert options.use_threads is False options = pa.ipc.IpcWriteOptions( metadata_version=pa.ipc.MetadataVersion.V4, allow_64bit=True, use_legacy_format=True, compression='lz4', use_threads=False) assert options.metadata_version == pa.ipc.MetadataVersion.V4 assert options.allow_64bit is True assert options.use_legacy_format is True assert options.compression == 'lz4' assert options.use_threads is False def test_write_options_legacy_exclusive(stream_fixture): with pytest.raises( ValueError, match="provide at most one of options and use_legacy_format"): stream_fixture.use_legacy_ipc_format = True stream_fixture.options = pa.ipc.IpcWriteOptions() stream_fixture.write_batches() @pytest.mark.parametrize('options', [ pa.ipc.IpcWriteOptions(), pa.ipc.IpcWriteOptions(allow_64bit=True), pa.ipc.IpcWriteOptions(use_legacy_format=True), pa.ipc.IpcWriteOptions(metadata_version=pa.ipc.MetadataVersion.V4), pa.ipc.IpcWriteOptions(use_legacy_format=True, metadata_version=pa.ipc.MetadataVersion.V4), ]) def test_stream_options_roundtrip(stream_fixture, options): stream_fixture.use_legacy_ipc_format = None stream_fixture.options = options batches = stream_fixture.write_batches() file_contents = pa.BufferReader(stream_fixture.get_source()) message = pa.ipc.read_message(stream_fixture.get_source()) assert message.metadata_version == options.metadata_version reader = pa.ipc.open_stream(file_contents) assert reader.schema.equals(batches[0].schema) total = 0 for i, next_batch in enumerate(reader): assert next_batch.equals(batches[i]) total += 1 assert total == len(batches) with pytest.raises(StopIteration): reader.read_next_batch() def test_dictionary_delta(stream_fixture): ty = pa.dictionary(pa.int8(), pa.utf8()) data = [["foo", "foo", None], ["foo", "bar", "foo"], # potential delta ["foo", "bar"], ["foo", None, "bar", "quux"], # potential delta ["bar", "quux"], # replacement ] batches = [ pa.RecordBatch.from_arrays([pa.array(v, type=ty)], names=['dicts']) for v in data] schema = batches[0].schema def write_batches(): with stream_fixture._get_writer(pa.MockOutputStream(), schema) as writer: for batch in batches: writer.write_batch(batch) return writer.stats st = write_batches() assert st.num_record_batches == 5 assert st.num_dictionary_batches == 4 assert st.num_replaced_dictionaries == 3 assert st.num_dictionary_deltas == 0 stream_fixture.use_legacy_ipc_format = None stream_fixture.options = pa.ipc.IpcWriteOptions( emit_dictionary_deltas=True) st = write_batches() assert st.num_record_batches == 5 assert st.num_dictionary_batches == 4 assert st.num_replaced_dictionaries == 1 assert st.num_dictionary_deltas == 2 def test_envvar_set_legacy_ipc_format(): schema = pa.schema([pa.field('foo', pa.int32())]) writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) assert not writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V5 writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) assert not writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V5 with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'): writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) assert writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V5 writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) assert writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V5 with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'): writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) assert not writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V4 writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) assert not writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V4 with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'): with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'): writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) assert writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V4 writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) assert writer._use_legacy_format assert writer._metadata_version == pa.ipc.MetadataVersion.V4 def test_stream_read_all(stream_fixture): batches = stream_fixture.write_batches() file_contents = pa.BufferReader(stream_fixture.get_source()) reader = pa.ipc.open_stream(file_contents) result = reader.read_all() expected = pa.Table.from_batches(batches) assert result.equals(expected) @pytest.mark.pandas def test_stream_read_pandas(stream_fixture): frames = [batch.to_pandas() for batch in stream_fixture.write_batches()] file_contents = stream_fixture.get_source() reader = pa.ipc.open_stream(file_contents) result = reader.read_pandas() expected = pd.concat(frames).reset_index(drop=True) assert_frame_equal(result, expected) @pytest.fixture def example_messages(stream_fixture): batches = stream_fixture.write_batches() file_contents = stream_fixture.get_source() buf_reader = pa.BufferReader(file_contents) reader = pa.MessageReader.open_stream(buf_reader) return batches, list(reader) def test_message_ctors_no_segfault(): with pytest.raises(TypeError): repr(pa.Message()) with pytest.raises(TypeError): repr(pa.MessageReader()) def test_message_reader(example_messages): _, messages = example_messages assert len(messages) == 6 assert messages[0].type == 'schema' assert isinstance(messages[0].metadata, pa.Buffer) assert isinstance(messages[0].body, pa.Buffer) assert messages[0].metadata_version == pa.MetadataVersion.V5 for msg in messages[1:]: assert msg.type == 'record batch' assert isinstance(msg.metadata, pa.Buffer) assert isinstance(msg.body, pa.Buffer) assert msg.metadata_version == pa.MetadataVersion.V5 def test_message_serialize_read_message(example_messages): _, messages = example_messages msg = messages[0] buf = msg.serialize() reader = pa.BufferReader(buf.to_pybytes() * 2) restored = pa.ipc.read_message(buf) restored2 = pa.ipc.read_message(reader) restored3 = pa.ipc.read_message(buf.to_pybytes()) restored4 = pa.ipc.read_message(reader) assert msg.equals(restored) assert msg.equals(restored2) assert msg.equals(restored3) assert msg.equals(restored4) with pytest.raises(pa.ArrowInvalid, match="Corrupted message"): pa.ipc.read_message(pa.BufferReader(b'ab')) with pytest.raises(EOFError): pa.ipc.read_message(reader) def test_message_read_from_compressed(example_messages): # Part of ARROW-5910 _, messages = example_messages for message in messages: raw_out = pa.BufferOutputStream() with pa.output_stream(raw_out, compression='gzip') as compressed_out: message.serialize_to(compressed_out) compressed_buf = raw_out.getvalue() result = pa.ipc.read_message(pa.input_stream(compressed_buf, compression='gzip')) assert result.equals(message) def test_message_read_record_batch(example_messages): batches, messages = example_messages for batch, message in zip(batches, messages[1:]): read_batch = pa.ipc.read_record_batch(message, batch.schema) assert read_batch.equals(batch) def test_read_record_batch_on_stream_error_message(): # ARROW-5374 batch = pa.record_batch([pa.array([b"foo"], type=pa.utf8())], names=['strs']) stream = pa.BufferOutputStream() with pa.ipc.new_stream(stream, batch.schema) as writer: writer.write_batch(batch) buf = stream.getvalue() with pytest.raises(IOError, match="type record batch but got schema"): pa.ipc.read_record_batch(buf, batch.schema) # ---------------------------------------------------------------------- # Socket streaming testa class StreamReaderServer(threading.Thread): def init(self, do_read_all): self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.bind(('127.0.0.1', 0)) self._sock.listen(1) host, port = self._sock.getsockname() self._do_read_all = do_read_all self._schema = None self._batches = [] self._table = None return port def run(self): connection, client_address = self._sock.accept() try: source = connection.makefile(mode='rb') reader = pa.ipc.open_stream(source) self._schema = reader.schema if self._do_read_all: self._table = reader.read_all() else: for i, batch in enumerate(reader): self._batches.append(batch) finally: connection.close() def get_result(self): return(self._schema, self._table if self._do_read_all else self._batches) class SocketStreamFixture(IpcFixture): def __init__(self): # XXX(wesm): test will decide when to start socket server. This should # probably be refactored pass def start_server(self, do_read_all): self._server = StreamReaderServer() port = self._server.init(do_read_all) self._server.start() self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.connect(('127.0.0.1', port)) self.sink = self.get_sink() def stop_and_get_result(self): import struct self.sink.write(struct.pack('Q', 0)) self.sink.flush() self._sock.close() self._server.join() return self._server.get_result() def get_sink(self): return self._sock.makefile(mode='wb') def _get_writer(self, sink, schema): return pa.RecordBatchStreamWriter(sink, schema) @pytest.fixture def socket_fixture(): return SocketStreamFixture() def test_socket_simple_roundtrip(socket_fixture): socket_fixture.start_server(do_read_all=False) writer_batches = socket_fixture.write_batches() reader_schema, reader_batches = socket_fixture.stop_and_get_result() assert reader_schema.equals(writer_batches[0].schema) assert len(reader_batches) == len(writer_batches) for i, batch in enumerate(writer_batches): assert reader_batches[i].equals(batch) def test_socket_read_all(socket_fixture): socket_fixture.start_server(do_read_all=True) writer_batches = socket_fixture.write_batches() _, result = socket_fixture.stop_and_get_result() expected = pa.Table.from_batches(writer_batches) assert result.equals(expected) # ---------------------------------------------------------------------- # Miscellaneous IPC tests @pytest.mark.pandas def test_ipc_file_stream_has_eos(): # ARROW-5395 df = pd.DataFrame({'foo': [1.5]}) batch = pa.RecordBatch.from_pandas(df) sink = pa.BufferOutputStream() write_file(batch, sink) buffer = sink.getvalue() # skip the file magic reader = pa.ipc.open_stream(buffer[8:]) # will fail if encounters footer data instead of eos rdf = reader.read_pandas() assert_frame_equal(df, rdf) @pytest.mark.pandas def test_ipc_zero_copy_numpy(): df = pd.DataFrame({'foo': [1.5]}) batch = pa.RecordBatch.from_pandas(df) sink = pa.BufferOutputStream() write_file(batch, sink) buffer = sink.getvalue() reader = pa.BufferReader(buffer) batches = read_file(reader) data = batches[0].to_pandas() rdf = pd.DataFrame(data) assert_frame_equal(df, rdf) def test_ipc_stream_no_batches(): # ARROW-2307 table = pa.Table.from_arrays([pa.array([1, 2, 3, 4]), pa.array(['foo', 'bar', 'baz', 'qux'])], names=['a', 'b']) sink = pa.BufferOutputStream() with pa.ipc.new_stream(sink, table.schema): pass source = sink.getvalue() with pa.ipc.open_stream(source) as reader: result = reader.read_all() assert result.schema.equals(table.schema) assert len(result) == 0 @pytest.mark.pandas def test_get_record_batch_size(): N = 10 itemsize = 8 df = pd.DataFrame({'foo': np.random.randn(N)}) batch = pa.RecordBatch.from_pandas(df) assert pa.ipc.get_record_batch_size(batch) > (N * itemsize) @pytest.mark.pandas def _check_serialize_pandas_round_trip(df, use_threads=False): buf = pa.serialize_pandas(df, nthreads=2 if use_threads else 1) result = pa.deserialize_pandas(buf, use_threads=use_threads) assert_frame_equal(result, df) @pytest.mark.pandas def test_pandas_serialize_round_trip(): index = pd.Index([1, 2, 3], name='my_index') columns = ['foo', 'bar'] df = pd.DataFrame( {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, index=index, columns=columns ) _check_serialize_pandas_round_trip(df) @pytest.mark.pandas def test_pandas_serialize_round_trip_nthreads(): index = pd.Index([1, 2, 3], name='my_index') columns = ['foo', 'bar'] df = pd.DataFrame( {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, index=index, columns=columns ) _check_serialize_pandas_round_trip(df, use_threads=True) @pytest.mark.pandas def test_pandas_serialize_round_trip_multi_index(): index1 = pd.Index([1, 2, 3], name='level_1') index2 = pd.Index(list('def'), name=None) index = pd.MultiIndex.from_arrays([index1, index2]) columns = ['foo', 'bar'] df = pd.DataFrame( {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, index=index, columns=columns, ) _check_serialize_pandas_round_trip(df) @pytest.mark.pandas def test_serialize_pandas_empty_dataframe(): df = pd.DataFrame() _check_serialize_pandas_round_trip(df) @pytest.mark.pandas def test_pandas_serialize_round_trip_not_string_columns(): df = pd.DataFrame(list(zip([1.5, 1.6, 1.7], 'abc'))) buf = pa.serialize_pandas(df) result = pa.deserialize_pandas(buf) assert_frame_equal(result, df) @pytest.mark.pandas def test_serialize_pandas_no_preserve_index(): df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3]) expected = pd.DataFrame({'a': [1, 2, 3]}) buf = pa.serialize_pandas(df, preserve_index=False) result = pa.deserialize_pandas(buf) assert_frame_equal(result, expected) buf = pa.serialize_pandas(df, preserve_index=True) result = pa.deserialize_pandas(buf) assert_frame_equal(result, df) @pytest.mark.pandas @pytest.mark.filterwarnings("ignore:'pyarrow:FutureWarning") def test_serialize_with_pandas_objects(): df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3]) s = pd.Series([1, 2, 3, 4]) data = { 'a_series': df['a'], 'a_frame': df, 's_series': s } serialized = pa.serialize(data).to_buffer() deserialized = pa.deserialize(serialized) assert_frame_equal(deserialized['a_frame'], df) assert_series_equal(deserialized['a_series'], df['a']) assert deserialized['a_series'].name == 'a' assert_series_equal(deserialized['s_series'], s) assert deserialized['s_series'].name is None @pytest.mark.pandas def test_schema_batch_serialize_methods(): nrows = 5 df = pd.DataFrame({ 'one': np.random.randn(nrows), 'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']}) batch = pa.RecordBatch.from_pandas(df) s_schema = batch.schema.serialize() s_batch = batch.serialize() recons_schema = pa.ipc.read_schema(s_schema) recons_batch = pa.ipc.read_record_batch(s_batch, recons_schema) assert recons_batch.equals(batch) def test_schema_serialization_with_metadata(): field_metadata = {b'foo': b'bar', b'kind': b'field'} schema_metadata = {b'foo': b'bar', b'kind': b'schema'} f0 = pa.field('a', pa.int8()) f1 = pa.field('b', pa.string(), metadata=field_metadata) schema = pa.schema([f0, f1], metadata=schema_metadata) s_schema = schema.serialize() recons_schema = pa.ipc.read_schema(s_schema) assert recons_schema.equals(schema) assert recons_schema.metadata == schema_metadata assert recons_schema[0].metadata is None assert recons_schema[1].metadata == field_metadata def test_deprecated_pyarrow_ns_apis(): table = pa.table([pa.array([1, 2, 3, 4])], names=['a']) sink = pa.BufferOutputStream() with pa.ipc.new_stream(sink, table.schema) as writer: writer.write(table) with pytest.warns(FutureWarning, match="please use pyarrow.ipc.open_stream"): pa.open_stream(sink.getvalue()) sink = pa.BufferOutputStream() with pa.ipc.new_file(sink, table.schema) as writer: writer.write(table) with pytest.warns(FutureWarning, match="please use pyarrow.ipc.open_file"): pa.open_file(sink.getvalue()) def write_file(batch, sink): with pa.ipc.new_file(sink, batch.schema) as writer: writer.write_batch(batch) def read_file(source): with pa.ipc.open_file(source) as reader: return [reader.get_batch(i) for i in range(reader.num_record_batches)] def test_write_empty_ipc_file(): # ARROW-3894: IPC file was not being properly initialized when no record # batches are being written schema = pa.schema([('field', pa.int64())]) sink = pa.BufferOutputStream() with pa.ipc.new_file(sink, schema): pass buf = sink.getvalue() with pa.RecordBatchFileReader(pa.BufferReader(buf)) as reader: table = reader.read_all() assert len(table) == 0 assert table.schema.equals(schema) def test_py_record_batch_reader(): def make_schema(): return pa.schema([('field', pa.int64())]) def make_batches(): schema = make_schema() batch1 = pa.record_batch([[1, 2, 3]], schema=schema) batch2 = pa.record_batch([[4, 5]], schema=schema) return [batch1, batch2] # With iterable batches = UserList(make_batches()) # weakrefable wr = weakref.ref(batches) with pa.ipc.RecordBatchReader.from_batches(make_schema(), batches) as reader: batches = None assert wr() is not None assert list(reader) == make_batches() assert wr() is None # With iterator batches = iter(UserList(make_batches())) # weakrefable wr = weakref.ref(batches) with pa.ipc.RecordBatchReader.from_batches(make_schema(), batches) as reader: batches = None assert wr() is not None assert list(reader) == make_batches() assert wr() is None
apache-2.0
themrmax/scikit-learn
sklearn/decomposition/truncated_svd.py
13
8301
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck # Olivier Grisel <olivier.grisel@ensta.org> # Michael Becker <mike@beckerfuffle.com> # License: 3-clause BSD. import numpy as np import scipy.sparse as sp from scipy.sparse.linalg import svds from ..base import BaseEstimator, TransformerMixin from ..utils import check_array, check_random_state from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). Contrary to PCA, this estimator does not center the data before computing the singular value decomposition. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithms: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional (default 5) Number of iterations for randomized SVD solver. Not used by ARPACK. The default is larger than the default in `randomized_svd` to handle sparse matrices that may have large slowly decaying spectrum. random_state : int, RandomState instance or None, optional, default = None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : array, shape (n_components, n_features) explained_variance_ : array, shape (n_components,) The variance of the training samples transformed by a projection to each component. explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. singular_values_ : array, shape (n_components,) The singular values corresponding to each of the selected components. The singular values are equal to the 2-norms of the ``n_components`` variables in the lower-dimensional space. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.0606... 0.0584... 0.0497... 0.0434... 0.0372...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.249... >>> print(svd.singular_values_) # doctest: +ELLIPSIS [ 2.5841... 2.5245... 2.3201... 2.1753... 2.0443...] See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0.): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse=['csr', 'csc']) random_state = check_random_state(self.random_state) if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio X_transformed = U * Sigma self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var self.singular_values_ = Sigma # Store the singular values. return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse='csr') return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_)
bsd-3-clause
DamCB/tyssue
tyssue/particles/point_cloud.py
2
10119
"""Utilities to generate point clouds The positions of the points are generated along the architecture of the epithelium. """ from collections import abc import numpy as np import pandas as pd from ..config.subdiv import bulk_spec class EdgeSubdiv: """ Container class to ease discretisation along the edges """ def __init__(self, edge_df, **kwargs): """Creates an indexer and an offset array to ease discretisation along the edges. Parameters ---------- edge_df: pd.DataFrame, Keyword parameters ------------------ density: number of points per edge Attributes ---------- upcaster: np.ndarray, shape (Np,) edge indices repeated to match the lookup table offset: np.ndarray, shape (Np,) piecewise linear offset along the edges, such that ::math:M_{ij}^n = offset[n]*r_{ij}: """ self.edge_df = edge_df.copy() self.n_edges = self.edge_df.shape[0] self.specs = bulk_spec() self.specs.update(**kwargs) self.unit_length = edge_df.length.mean() if "density" not in edge_df: self.edge_df["density"] = self.specs["density"] self.n_points = 0 self.points = None self.offset_lut = None self.update_all() def update_all(self): self.update_offset_lut() self.update_particles() self.update_upcaster() self.update_offset() @staticmethod def from_eptm_edges(eptm, **kwargs): """Creates an EdgeSubdiv instance and computes the point grid allong the edges from the source vertex to its target. Returns ------- subdiv: a :class:`EdgeSubdiv` instance """ subdiv = EdgeSubdiv(eptm.edge_df[["length", "density"]], **kwargs) srce_pos = eptm.upcast_srce(eptm.vert_df[eptm.coords]) r_ij = eptm.edge_df[eptm.dcoords] subdiv.edge_point_cloud(srce_pos, r_ij) return subdiv @staticmethod def _offset_lut_(num): return np.arange(0.5, num + 0.5) / num def update_offset_lut(self, offset_lut=None): """ Updates the density lookup table function. The `offset_lut` can be any function with a single `num` argument Parameters ---------- offset_lut: function, default None, edge-wise function of the number of points giving the offset positions Default is a shifted regular grid: `np.arange(0.5, num+0.5) / num` """ if offset_lut is None: self.offset_lut = self._offset_lut_ else: self.offset_lut = offset_lut def update_particles(self): """ * Updates the number of particles per edge from edges length and density values: `num_particles = length * density` * Also updates the self.points df """ self.edge_df["norm_length"] = self.edge_df["length"] / self.unit_length points_per_edges = np.round(self.edge_df.eval("norm_length * density")).astype( np.int ) self.edge_df["num_particles"] = points_per_edges self.n_points = points_per_edges.sum() self.points = pd.DataFrame( np.zeros((self.n_points, 2)), columns=["upcaster", "offset"] ) def update_upcaster(self): """ resets the 'upcaster' column of self.points, 'upcaster' indexes over self.edge_df repeated to upcast data from the edge df to the points df """ self.points["upcaster"] = np.repeat( np.arange(self.edge_df.shape[0]), self.edge_df["num_particles"] ) def update_offset(self): self.points["offset"] = np.concatenate( [self.offset_lut(num=ns) for ns in self.edge_df["num_particles"]] ) def validate(self): if not self.points["upcaster"].max() + 1 == self.n_edges: return False if not self.points["upcaster"].shape[0] == self.n_points: return False if not self.points["offset"].shape()[0] == self.n_points: return False return True def upcast(self, df): if isinstance(df, str) and df in self.edge_df: return self.edge_df.loc[self.points["upcaster"], df] elif ( isinstance(df, abc.Iterable) and isinstance(df[0], str) and set(df).issubset(self.edge_df.columns) ): return self.edge_df.loc[self.points["upcaster"], df] elif hasattr(df, "loc"): return df.loc[self.points["upcaster"]] else: raise ValueError( """ Argument df should be a column name or a sequence of column names or a Series or Dataframe indexed like self.edge_df """ ) def edge_point_cloud( self, srce_pos, r_ij, offset_modulation=None, modulation_kwargs=None, coords=["x", "y", "z"], dcoords=["dx", "dy", "dz"], ): """Generates a point cloud along the edges of the epithelium. if a offset_modulation function is provided, it is used to transform the offsets Parameters ---------- srce_pos: DataFrame of shape (self.Ne, ndim) with the origins of the points for each edge (usually the edge upcasted source vertex) r_ij: DataFrame of shape (self.Ne, ndim) the edge vector coordiantes offset_modulation: function of self returning an array with shape (self.Np,) containing the modified offsets. self.points['offset'] is used by default. modulation_kwargs: keyword arguments to the modulation function Returns ------- points: (Np, 3) pd.DataFrame with the points positions """ for u, du in zip(coords, dcoords): self.edge_df[u] = srce_pos[u] self.edge_df["d" + u] = r_ij[du] cols = coords + dcoords upcast = self.edge_df.loc[self.points["upcaster"], cols] if offset_modulation is None: upcast["offset"] = self.points["offset"].values else: upcast["offset"] = offset_modulation(self, **modulation_kwargs) for c in coords: self.points[c] = upcast.eval("{} + offset * {}".format(c, "d" + c)).values if self.specs["noise"] > 0.0: self.points[coords] += np.random.normal( scale=self.specs["noise"], size=(self.n_points, 3) ) return self.points[coords] def get_edge_bases(eptm, base=("face", "srce", "trgt")): edge_upcast_pos = { element: eptm.upcast_cols(element, eptm.coords) for element in base } origin = base[0] edge_bases = {} for vertex in base[1:]: df = pd.DataFrame( 0, columns=eptm.coords + eptm.dcoords + ["length"], index=eptm.edge_df.index ) df[eptm.dcoords] = ( edge_upcast_pos[vertex].values - edge_upcast_pos[origin].values ) df["length"] = np.linalg.norm(df[eptm.dcoords].values, axis=1) df[eptm.coords] = edge_upcast_pos[origin].values edge_bases["{}_{}".format(origin, vertex)] = df.copy() return edge_bases class FaceGrid: def __init__(self, edges_df, base, **kwargs): self.origin = base[0] self.base = ["{}_{}".format(base[0], other) for other in base[1:]] self.specs = bulk_spec() self.specs.update(kwargs) e_specs = kwargs self.subdivs = {key: EdgeSubdiv(edges_df[key], **e_specs) for key in self.base} self.n_points = np.product( [ subdiv.edge_df["num_particles"].values for subdiv in self.subdivs.values() ], axis=0, ).sum() self.dim = len(self.subdivs) self.up_cols = ["up_{}".format(key) for key in self.base] self.of_cols = ["of_{}".format(key) for key in self.base] self.points = None def update_grid(self): upcasters = {} for key, subdiv in self.subdivs.items(): upcasters["up_" + key] = subdiv.points["upcaster"] upcasters["of_" + key] = subdiv.points["offset"] upcasters = pd.DataFrame.from_dict(upcasters) points = {} u_axis = "up_{}".format(self.base[0]) upcasters.set_index(u_axis, drop=False, inplace=True) for cols in (self.of_cols, self.up_cols): df = upcasters.groupby(level=u_axis).apply(_local_grid, *cols) points.update({col: df[col].values for col in cols}) self.points = pd.DataFrame.from_dict(points) def face_point_cloud(self, coords=["x", "y", "z"], dcoords=["dx", "dy", "dz"]): upcast = {} offsets = self.points[self.of_cols] for key, subdiv in self.subdivs.items(): upcast[key] = subdiv.edge_df.loc[ self.points["up_{}".format(key)], coords + dcoords + ["length"] ].copy() upcast[key].reset_index(inplace=True) upcast[key]["offset"] = offsets["of_{}".format(key)].values for u, du in zip(coords, dcoords): self.points[u] = upcast[self.base[0]].eval( "{} + offset * {}".format(u, du) ).values + np.sum( [ upcast[other].eval("offset * {}".format(du)).values for other in self.base[1:] ], axis=0, ) in_out = np.zeros(self.points.shape[0], dtype=bool) for other in self.base[1:]: xx = upcast[self.base[0]]["offset"].values yy = upcast[other]["offset"].values in_out += (xx + yy) < 1.0 self.points = self.points[in_out] return self.points[coords] def _local_grid(df, *cols): grid = np.meshgrid(*(df[col] for col in cols)) out = pd.DataFrame.from_dict({col: mm.ravel() for col, mm in zip(cols, grid)}) return out
gpl-3.0
GPflow/GPflow
doc/source/notebooks/advanced/gps_for_big_data.pct.py
1
8027
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.3.3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Stochastic Variational Inference for scalability with SVGP # %% [markdown] # One of the main criticisms of Gaussian processes is their scalability to large datasets. In this notebook, we illustrate how to use the state-of-the-art Stochastic Variational Gaussian Process (SVGP) (*Hensman, et. al. 2013*) to overcome this problem. # %% # %matplotlib inline import itertools import numpy as np import time import gpflow import tensorflow as tf import matplotlib.pyplot as plt from gpflow.ci_utils import ci_niter plt.style.use("ggplot") # for reproducibility of this notebook: rng = np.random.RandomState(123) tf.random.set_seed(42) # %% [markdown] # ## Generating data # For this notebook example, we generate 10,000 noisy observations from a test function: # \begin{equation} # f(x) = \sin(3\pi x) + 0.3\cos(9\pi x) + \frac{\sin(7 \pi x)}{2} # \end{equation} # %% def func(x): return np.sin(x * 3 * 3.14) + 0.3 * np.cos(x * 9 * 3.14) + 0.5 * np.sin(x * 7 * 3.14) N = 10000 # Number of training observations X = rng.rand(N, 1) * 2 - 1 # X values Y = func(X) + 0.2 * rng.randn(N, 1) # Noisy Y values data = (X, Y) # %% [markdown] # We plot the data along with the noiseless generating function: # %% plt.plot(X, Y, "x", alpha=0.2) Xt = np.linspace(-1.1, 1.1, 1000)[:, None] Yt = func(Xt) _ = plt.plot(Xt, Yt, c="k") # %% [markdown] # ## Building the model # The main idea behind SVGP is to approximate the true GP posterior with a GP conditioned on a small set of "inducing" values. This smaller set can be thought of as summarizing the larger dataset. For this example, we will select a set of 50 inducing locations that are initialized from the training dataset: # %% M = 50 # Number of inducing locations kernel = gpflow.kernels.SquaredExponential() Z = X[:M, :].copy() # Initialize inducing locations to the first M inputs in the dataset m = gpflow.models.SVGP(kernel, gpflow.likelihoods.Gaussian(), Z, num_data=N) # %% [markdown] # ## Likelihood computation: batch vs. minibatch # First we showcase the model's performance using the whole dataset to compute the ELBO. # %% elbo = tf.function(m.elbo) # %% # TensorFlow re-traces & compiles a `tf.function`-wrapped method at *every* call if the arguments are numpy arrays instead of tf.Tensors. Hence: tensor_data = tuple(map(tf.convert_to_tensor, data)) elbo(tensor_data) # run it once to trace & compile # %% # %%timeit elbo(tensor_data) # %% [markdown] # We can speed up this calculation by using minibatches of the data. For this example, we use minibatches of size 100. # %% minibatch_size = 100 train_dataset = tf.data.Dataset.from_tensor_slices((X, Y)).repeat().shuffle(N) train_iter = iter(train_dataset.batch(minibatch_size)) ground_truth = elbo(tensor_data).numpy() # %% # %%timeit elbo(next(train_iter)) # %% [markdown] # ### Stochastical estimation of ELBO # The minibatch estimate should be an unbiased estimator of the `ground_truth`. Here we show a histogram of the value from different evaluations, together with its mean and the ground truth. The small difference between the mean of the minibatch estimations and the ground truth shows that the minibatch estimator is working as expected. # %% evals = [elbo(minibatch).numpy() for minibatch in itertools.islice(train_iter, 100)] # %% plt.hist(evals, label="Minibatch estimations") plt.axvline(ground_truth, c="k", label="Ground truth") plt.axvline(np.mean(evals), c="g", ls="--", label="Minibatch mean") plt.legend() plt.title("Histogram of ELBO evaluations using minibatches") print("Discrepancy between ground truth and minibatch estimate:", ground_truth - np.mean(evals)) # %% [markdown] # ### Minibatches speed up computation # The reason for using minibatches is that it decreases the time needed to make an optimization step, because estimating the objective is computationally cheaper with fewer data points. Here we plot the change in time required with the size of the minibatch. We see that smaller minibatches result in a cheaper estimate of the objective. # %% # Evaluate objective for different minibatch sizes minibatch_proportions = np.logspace(-2, 0, 10) times = [] objs = [] for mbp in minibatch_proportions: batchsize = int(N * mbp) train_iter = iter(train_dataset.batch(batchsize)) start_time = time.time() objs.append([elbo(minibatch) for minibatch in itertools.islice(train_iter, 20)]) times.append(time.time() - start_time) # %% f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) ax1.plot(minibatch_proportions, times, "x-") ax1.set_xlabel("Minibatch proportion") ax1.set_ylabel("Time taken") ax2.plot(minibatch_proportions, np.array(objs), "kx") ax2.set_xlabel("Minibatch proportion") ax2.set_ylabel("ELBO estimates") # %% [markdown] # ### Running stochastic optimization # %% [markdown] # First we create a utility function that plots the model's predictions: # %% def plot(title=""): plt.figure(figsize=(12, 4)) plt.title(title) pX = np.linspace(-1, 1, 100)[:, None] # Test locations pY, pYv = m.predict_y(pX) # Predict Y values at test locations plt.plot(X, Y, "x", label="Training points", alpha=0.2) (line,) = plt.plot(pX, pY, lw=1.5, label="Mean of predictive posterior") col = line.get_color() plt.fill_between( pX[:, 0], (pY - 2 * pYv ** 0.5)[:, 0], (pY + 2 * pYv ** 0.5)[:, 0], color=col, alpha=0.6, lw=1.5, ) Z = m.inducing_variable.Z.numpy() plt.plot(Z, np.zeros_like(Z), "k|", mew=2, label="Inducing locations") plt.legend(loc="lower right") plot(title="Predictions before training") # %% [markdown] # Now we can train our model. For optimizing the ELBO, we use the Adam Optimizer *(Kingma and Ba 2015)* which is designed for stochastic objective functions. We create a `run_adam` utility function to perform the optimization. # %% minibatch_size = 100 # We turn off training for inducing point locations gpflow.set_trainable(m.inducing_variable, False) def run_adam(model, iterations): """ Utility function running the Adam optimizer :param model: GPflow model :param interations: number of iterations """ # Create an Adam Optimizer action logf = [] train_iter = iter(train_dataset.batch(minibatch_size)) training_loss = model.training_loss_closure(train_iter, compile=True) optimizer = tf.optimizers.Adam() @tf.function def optimization_step(): optimizer.minimize(training_loss, model.trainable_variables) for step in range(iterations): optimization_step() if step % 10 == 0: elbo = -training_loss().numpy() logf.append(elbo) return logf # %% [markdown] # Now we run the optimization loop for 20,000 iterations. # %% maxiter = ci_niter(20000) logf = run_adam(m, maxiter) plt.plot(np.arange(maxiter)[::10], logf) plt.xlabel("iteration") _ = plt.ylabel("ELBO") # %% [markdown] # Finally, we plot the model's predictions. # %% plot("Predictions after training") # %% [markdown] # ## Further reading # # Several notebooks expand on this one: # # - [Advanced Sparse GP regression](../advanced/advanced_many_points.ipynb), which goes into deeper detail on sparse Gaussian process methods. # - [Optimization](../advanced/optimisation.ipynb) discussing optimizing GP models. # - [Natural gradients](../advanced/natural_gradients.ipynb) for optimizing SVGP models efficiently. # %% [markdown] # ## References: # Hensman, James, Nicolo Fusi, and Neil D. Lawrence. "Gaussian processes for big data." Uncertainty in Artificial Intelligence (2013). # # Kingma, Diederik P., and Jimmy Ba. "Adam: A method for stochastic optimization." arXiv preprint arXiv:1412.6980 (2014).
apache-2.0
trangel/Data-Science
tracking-purchases/src/anomaly_detection.py
1
5283
""" Program that solves the anomaly detection insight problem. """ def main(argv): """ Main routine that runs the anomaly detection algorithm. The procedure followed is: 1) Get command line arguments (if any). 2) Initialize a graph structure for the user network. 3) Initialize a database to keep track of the history of purchases. 4) Parse "batch_log" file. While parsing the purchase-history database and the user-network graph are updated. 5) Parse "stream_log" file. Here the database and graph are updated as in 4). Purchase events are evaluated as anomalous or not. --------------- Optional arguments: batch_log_fname, str, batch_log file name Default "batch_log.json" stream_log_fname, str, stream_log file name Default "stream_log.json" output_fname, str, output file name Default "flagged_purchases.json" ------------ Objects df, pandas dataframe, database of history of purchases. Columns: timestamp, str, time of event. id, str, user ID. amount, str, amount of transaction. """ # Import packages import pandas as pd import timeit # Import user defined functions/objects: from user_network import user_network from parser import parse_log_file # Set debug to True to print out user network and save purchase database to file at the end of execution. debug=False start_time = timeit.default_timer() # Get file names and command line arguments: file_names=get_command_line_arguments(argv) # Create a graph datastructure which contains our user network. # See doc. in user_network.py g = user_network() # Create an empty database to keep the history of purchases columns=["purchase_index","timestamp","id","amount"] df=pd.DataFrame(columns=columns) # Parse the "batch_log.json" file file_type=1; purchase_index=0 df,purchase_index=parse_log_file(df,g,file_names,file_type,purchase_index) # Parse stream_log_file file_type=2 df,purchase_index=parse_log_file(df,g,file_names,file_type,purchase_index) # Print out social network and write database to file for debugging purposes if ( debug ): g.show_social_networks() df.to_csv("df.csv") # Print out exection time: elapsed = timeit.default_timer() - start_time print("Execution time {}".format(elapsed)) def get_full_file_names(batch_log_fname,stream_log_fname,output_fname): """ Define input/output file names with full paths: ------------ Arguments: batch_log_fname, str, name of batch_log file stream_log_fname, str, name of stream_log file output_log_fname, str, name of output file ------------ Side effects: If the "log_output_dirname" is not present, it is created. Input files are always in "./log_input/" Output files are always in "./log_output/" ------------ Output: file_names, dic, contains absolute paths to input and ouput files. batch_log_fname: batch_log input file name including full path stream_log_fname: stream_log input file name including full path output_fname: output file name including full path """ from os import path,getcwd,makedirs # Get input and ouput directory path names: current_dirname=getcwd() log_input_dirname=path.join(current_dirname,"log_input") log_output_dirname=path.join(current_dirname,"log_output") # Define absolute path of input/output files batch_log_fname=path.join(log_input_dirname,batch_log_fname) stream_log_fname=path.join(log_input_dirname,stream_log_fname) output_fname=path.join(log_output_dirname,output_fname) # Create output directory if not present if not path.exists(log_output_dirname): makedirs(log_output_dirname) # Create a "file_names" dictionary with input/output filenames file_names={ 'batch_log_fname':batch_log_fname, 'stream_log_fname':stream_log_fname, 'output_fname':output_fname} return file_names def get_command_line_arguments(argv): """ Gets command line arguments """ import sys, getopt # Set defaults: batch_log_fname = 'batch_log.json' stream_log_fname = 'stream_log.json' output_fname = 'flagged_purchases.json' try: opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="]) except getopt.GetoptError: pass for opt, arg in opts: if opt in ('-h', '--help') : print( "anomaly_detection.py -i1 <batch_log_fname> -i2 <stream_log_fname> -o <output_fname>") sys.exit() elif opt == "-i1": batch_log_fname = arg elif opt == "-i2": stream_log_fname = arg elif opt == "-o": output_fname = arg print( "Batch log file is {}".format( batch_log_fname)) print( "Stream log file is {}".format( stream_log_fname)) print( "Output file is {}".format( output_fname)) # Include full path into filename and put them into a # dictionary "file_names": file_names=get_full_file_names(batch_log_fname,stream_log_fname,output_fname) return file_names if __name__ == "__main__": import sys main(sys.argv[1:])
gpl-3.0
FDPS/FDPS
sample/c++/nbody+sph/magi_data/py/tipsy.py
3
6144
# $ python py/tipsy.py filename [pmax] import sys import numpy as np import matplotlib matplotlib.use("agg") import matplotlib.pyplot as plt # import re import struct def read_position_tipsy(filename): # open the file fin = open(filename, "rb") size = len(fin.read()) fin.seek(0) # read header (28 = 8 + 4 * 5, additional padding of 4 bytes is introduced) little_endian = True time, nbodies, ndim, nsph, ndark, nstar = struct.unpack("<diiiii", fin.read(28)) if (ndim < 1 or ndim > 3): little_endian = False fin.seek(0) time, nbodies, ndim, nsph, ndark, nstar = struct.unpack(">diiiii", fin.read(28)) # remove padding of 4 bytes fin.read(4) # this function reads dark matter particles only # skip gas particles (12 floats = 48 bytes) fin.read(48 * nsph) # arrays for dark matter particles px = [0] * ndark py = [0] * ndark pz = [0] * ndark # read dark matter particles (9 floats = 36 bytes) if ndark > 0: for ii in range(ndark): if little_endian: mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack("<fffffffff", fin.read(36)) else: mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack(">fffffffff", fin.read(36)) px[ii] = x py[ii] = y pz[ii] = z return (px, py, pz) def locate_panels(ax, nx, ny, share_xaxis, share_yaxis): margin = 0.12 if (share_xaxis == False) or (share_yaxis == False): margin = 0.15 xmin, xmax = margin, 1.0 - margin ymin, ymax = margin, 1.0 - margin xbin = (xmax - xmin) / nx ybin = (ymax - ymin) / ny xmargin, ymargin = 0, 0 if share_yaxis == False: xmin = 0.0 xbin = 1.0 / nx xmargin = xbin * margin if share_xaxis == False: ymin = 0.0 ybin = 1.0 / ny ymargin = ybin * margin for ii in range(nx): xl = xmin + ii * xbin + xmargin for jj in range(ny): yl = ymin + jj * ybin + ymargin kk = ii * ny + jj ax[kk] = fig.add_axes((xl, yl, xbin - 2 * xmargin, ybin - 2 * ymargin)) if share_xaxis == True: ax[kk].tick_params(labelbottom = "off") if jj == 0: ax[kk].tick_params(labelbottom = "on") if share_yaxis == True: ax[kk].tick_params(labelleft = "off") if ii == 0: ax[kk].tick_params(labelleft = "on") # obtain input argument(s) argv = sys.argv argc = len(argv) filename = argv[1] set_range = True if argc == 3: set_range = False pmax = float(argv[2]) # embed fonts plt.rcParams['ps.useafm'] = True plt.rcParams['pdf.use14corefonts'] = True plt.rcParams['text.usetex'] = True # set font size plt.rcParams['font.size'] = 14 # set number of panels nxpanel, nypanel = 2, 2 ax = [0] * nxpanel * nypanel # set figure size and its aspect ratio Lx = 6 if nxpanel > 2 * nypanel: Lx *= 2 Ly = (Lx / nxpanel) * nypanel fig = plt.figure(figsize = (Lx, Ly)) # set location of panels locate_panels(ax, nxpanel, nypanel, True, True) # get number of components and particles input = "doc/" + filename + ".summary.txt" fin = open(input, "r") lines = fin.readlines() fin.close() idx = lines[1].find("\t") kind = int(lines[1][0:idx])# number of components # get partition num = [0] * kind head = [0] * kind head[0] = 0 for ii in range(kind): idx = lines[2 + ii].find("\n") num[ii] = int(lines[2 + ii][0:idx]) if ii != 0: head[ii] = head[ii - 1] + num[ii - 1] # read particle data px, py, pz = read_position_tipsy("dat/" + filename + ".tipsy") # set plot range if set_range == True: pmax = max(np.abs(px)) ymax = max(np.abs(py)) zmax = max(np.abs(pz)) if pmax < ymax: pmax = ymax if pmax < zmax: pmax = zmax # sparse sampling if necessary skip = 1 nmax = 1048576 if len(px) > nmax: skip = int(np.ceil(len(px) / nmax)) # set colors of dots col = [0] * kind for ii in range(kind): if ii % 6 == 0: col[ii] = "black" if ii % 6 == 1: col[ii] = "red" if ii % 6 == 2: col[ii] = "blue" if ii % 6 == 3: col[ii] = "magenta" if ii % 6 == 4: col[ii] = "green" if ii % 6 == 5: col[ii] = "brown" # plot particle distribution for ii in range(nxpanel): for jj in range(nypanel): idx = ii * nypanel + jj if (idx != (nxpanel * nypanel - 1)): # ii = 0, jj = 0: xy-plot # ii = 0, jj = 1: xz-plot # ii = 1, jj = 0: zy-plot # ii = 1, jj = 1: blank if ii == 0: xx = px if ii == 1: xx = pz ax[idx].tick_params(labelleft = False) if jj == 0: yy = py if jj == 1: yy = pz ax[idx].tick_params(labelbottom = False) # plot the data for kk in range(kind): ax[idx].plot(xx[head[kk]:(head[kk]+num[kk]):skip], yy[head[kk]:(head[kk]+num[kk]):skip], ",", color = col[kk]) # set plot range ax[idx].set_xlim([-pmax, pmax]) ax[idx].set_ylim([-pmax, pmax]) # ax[idx].grid() ax[idx].tick_params(axis = "both", direction = "in", color = "black", bottom = "on", top = "on", left = "on", right = "on") # set label if (ii == 0) and (jj == 0): ax[idx].set_xlabel(r"$x$") ax[idx].set_ylabel(r"$y$") if (ii == 0) and (jj == 1): ax[idx].set_ylabel(r"$z$") if (ii == 1) and (jj == 0): ax[idx].set_xlabel(r"$z$") else: # remove box at the upper right corner ax[idx].spines["right"].set_color("none") ax[idx].spines["top"].set_color("none") ax[idx].tick_params(labelbottom = False, labelleft = False, bottom = False, left = False, right = False, top = False) # output the figure plt.savefig("dot.png", format = "png", dpi = 300, bbox_inches = "tight")
mit
walterreade/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
hvanhovell/spark
python/pyspark/sql/dataframe.py
3
94909
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import random if sys.version >= '3': basestring = unicode = str long = int from functools import reduce from html import escape as html_escape else: from itertools import imap as map from cgi import escape as html_escape import warnings from pyspark import copy_func, since, _NoValue from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket, \ ignore_unicode_prefix, PythonEvalType from pyspark.serializers import ArrowCollectSerializer, BatchedSerializer, PickleSerializer, \ UTF8Deserializer from pyspark.storagelevel import StorageLevel from pyspark.traceback_utils import SCCallSiteSync from pyspark.sql.types import _parse_datatype_json_string from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column from pyspark.sql.readwriter import DataFrameWriter from pyspark.sql.streaming import DataStreamWriter from pyspark.sql.types import IntegralType from pyspark.sql.types import * from pyspark.util import _exception_message __all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"] class DataFrame(object): """A distributed collection of data grouped into named columns. A :class:`DataFrame` is equivalent to a relational table in Spark SQL, and can be created using various functions in :class:`SparkSession`:: people = spark.read.parquet("...") Once created, it can be manipulated using the various domain-specific-language (DSL) functions defined in: :class:`DataFrame`, :class:`Column`. To select a column from the data frame, use the apply method:: ageCol = people.age A more concrete example:: # To create DataFrame using SparkSession people = spark.read.parquet("...") department = spark.read.parquet("...") people.filter(people.age > 30).join(department, people.deptId == department.id) \\ .groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"}) .. versionadded:: 1.3 """ def __init__(self, jdf, sql_ctx): self._jdf = jdf self.sql_ctx = sql_ctx self._sc = sql_ctx and sql_ctx._sc self.is_cached = False self._schema = None # initialized lazily self._lazy_rdd = None # Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice # by __repr__ and _repr_html_ while eager evaluation opened. self._support_repr_html = False @property @since(1.3) def rdd(self): """Returns the content as an :class:`pyspark.RDD` of :class:`Row`. """ if self._lazy_rdd is None: jrdd = self._jdf.javaToPython() self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer())) return self._lazy_rdd @property @since("1.3.1") def na(self): """Returns a :class:`DataFrameNaFunctions` for handling missing values. """ return DataFrameNaFunctions(self) @property @since(1.4) def stat(self): """Returns a :class:`DataFrameStatFunctions` for statistic functions. """ return DataFrameStatFunctions(self) @ignore_unicode_prefix @since(1.3) def toJSON(self, use_unicode=True): """Converts a :class:`DataFrame` into a :class:`RDD` of string. Each row is turned into a JSON document as one element in the returned RDD. >>> df.toJSON().first() u'{"age":2,"name":"Alice"}' """ rdd = self._jdf.toJSON() return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode)) @since(2.0) def createTempView(self, name): """Creates a local temporary view with this DataFrame. The lifetime of this temporary table is tied to the :class:`SparkSession` that was used to create this :class:`DataFrame`. throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the catalog. >>> df.createTempView("people") >>> df2 = spark.sql("select * from people") >>> sorted(df.collect()) == sorted(df2.collect()) True >>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... AnalysisException: u"Temporary table 'people' already exists;" >>> spark.catalog.dropTempView("people") """ self._jdf.createTempView(name) @since(2.0) def createOrReplaceTempView(self, name): """Creates or replaces a local temporary view with this DataFrame. The lifetime of this temporary table is tied to the :class:`SparkSession` that was used to create this :class:`DataFrame`. >>> df.createOrReplaceTempView("people") >>> df2 = df.filter(df.age > 3) >>> df2.createOrReplaceTempView("people") >>> df3 = spark.sql("select * from people") >>> sorted(df3.collect()) == sorted(df2.collect()) True >>> spark.catalog.dropTempView("people") """ self._jdf.createOrReplaceTempView(name) @since(2.1) def createGlobalTempView(self, name): """Creates a global temporary view with this DataFrame. The lifetime of this temporary view is tied to this Spark application. throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the catalog. >>> df.createGlobalTempView("people") >>> df2 = spark.sql("select * from global_temp.people") >>> sorted(df.collect()) == sorted(df2.collect()) True >>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... AnalysisException: u"Temporary table 'people' already exists;" >>> spark.catalog.dropGlobalTempView("people") """ self._jdf.createGlobalTempView(name) @since(2.2) def createOrReplaceGlobalTempView(self, name): """Creates or replaces a global temporary view using the given name. The lifetime of this temporary view is tied to this Spark application. >>> df.createOrReplaceGlobalTempView("people") >>> df2 = df.filter(df.age > 3) >>> df2.createOrReplaceGlobalTempView("people") >>> df3 = spark.sql("select * from global_temp.people") >>> sorted(df3.collect()) == sorted(df2.collect()) True >>> spark.catalog.dropGlobalTempView("people") """ self._jdf.createOrReplaceGlobalTempView(name) @property @since(1.4) def write(self): """ Interface for saving the content of the non-streaming :class:`DataFrame` out into external storage. :return: :class:`DataFrameWriter` """ return DataFrameWriter(self) @property @since(2.0) def writeStream(self): """ Interface for saving the content of the streaming :class:`DataFrame` out into external storage. .. note:: Evolving. :return: :class:`DataStreamWriter` """ return DataStreamWriter(self) @property @since(1.3) def schema(self): """Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`. >>> df.schema StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true))) """ if self._schema is None: try: self._schema = _parse_datatype_json_string(self._jdf.schema().json()) except AttributeError as e: raise Exception( "Unable to parse datatype from schema. %s" % e) return self._schema @since(1.3) def printSchema(self): """Prints out the schema in the tree format. >>> df.printSchema() root |-- age: integer (nullable = true) |-- name: string (nullable = true) <BLANKLINE> """ print(self._jdf.schema().treeString()) @since(1.3) def explain(self, extended=False): """Prints the (logical and physical) plans to the console for debugging purpose. :param extended: boolean, default ``False``. If ``False``, prints only the physical plan. >>> df.explain() == Physical Plan == *(1) Scan ExistingRDD[age#0,name#1] >>> df.explain(True) == Parsed Logical Plan == ... == Analyzed Logical Plan == ... == Optimized Logical Plan == ... == Physical Plan == ... """ if extended: print(self._jdf.queryExecution().toString()) else: print(self._jdf.queryExecution().simpleString()) @since(2.4) def exceptAll(self, other): """Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but not in another :class:`DataFrame` while preserving duplicates. This is equivalent to `EXCEPT ALL` in SQL. >>> df1 = spark.createDataFrame( ... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"]) >>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"]) >>> df1.exceptAll(df2).show() +---+---+ | C1| C2| +---+---+ | a| 1| | a| 1| | a| 2| | c| 4| +---+---+ Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx) @since(1.3) def isLocal(self): """Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally (without any Spark executors). """ return self._jdf.isLocal() @property @since(2.0) def isStreaming(self): """Returns true if this :class:`Dataset` contains one or more sources that continuously return data as it arrives. A :class:`Dataset` that reads data from a streaming source must be executed as a :class:`StreamingQuery` using the :func:`start` method in :class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or :func:`collect`) will throw an :class:`AnalysisException` when there is a streaming source present. .. note:: Evolving """ return self._jdf.isStreaming() @since(1.3) def show(self, n=20, truncate=True, vertical=False): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob """ if isinstance(truncate, bool) and truncate: print(self._jdf.showString(n, 20, vertical)) else: print(self._jdf.showString(n, int(truncate), vertical)) def __repr__(self): if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled(): vertical = False return self._jdf.showString( self.sql_ctx._conf.replEagerEvalMaxNumRows(), self.sql_ctx._conf.replEagerEvalTruncate(), vertical) else: return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes)) def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ if not self._support_repr_html: self._support_repr_html = True if self.sql_ctx._conf.isReplEagerEvalEnabled(): max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0) sock_info = self._jdf.getRowsToPython( max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate()) rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) head = rows[0] row_data = rows[1:] has_more_data = len(row_data) > max_num_rows row_data = row_data[:max_num_rows] html = "<table border='1'>\n" # generate table head html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head)) # generate table rows for row in row_data: html += "<tr><td>%s</td></tr>\n" % "</td><td>".join( map(lambda x: html_escape(x), row)) html += "</table>\n" if has_more_data: html += "only showing top %d %s\n" % ( max_num_rows, "row" if max_num_rows == 1 else "rows") return html else: return None @since(2.1) def checkpoint(self, eager=True): """Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. It will be saved to files inside the checkpoint directory set with :meth:`SparkContext.setCheckpointDir`. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.checkpoint(eager) return DataFrame(jdf, self.sql_ctx) @since(2.3) def localCheckpoint(self, eager=True): """Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """ jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx) @since(2.1) def withWatermark(self, eventTime, delayThreshold): """Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point in time before which we assume no more late data is going to arrive. Spark will use this watermark for several purposes: - To know when a given time window aggregation can be finalized and thus can be emitted when using output modes that do not allow updates. - To minimize the amount of state that we need to keep for on-going aggregations. The current watermark is computed by looking at the `MAX(eventTime)` seen across all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost of coordinating this value across partitions, the actual watermark used is only guaranteed to be at least `delayThreshold` behind the actual event time. In some cases we may still process records that arrive more than `delayThreshold` late. :param eventTime: the name of the column that contains the event time of the row. :param delayThreshold: the minimum delay to wait to data to arrive late, relative to the latest record that has been processed in the form of an interval (e.g. "1 minute" or "5 hours"). .. note:: Evolving >>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes') DataFrame[name: string, time: timestamp] """ if not eventTime or type(eventTime) is not str: raise TypeError("eventTime should be provided as a string") if not delayThreshold or type(delayThreshold) is not str: raise TypeError("delayThreshold should be provided as a string interval") jdf = self._jdf.withWatermark(eventTime, delayThreshold) return DataFrame(jdf, self.sql_ctx) @since(2.2) def hint(self, name, *parameters): """Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+ """ if len(parameters) == 1 and isinstance(parameters[0], list): parameters = parameters[0] if not isinstance(name, str): raise TypeError("name should be provided as str, got {0}".format(type(name))) allowed_types = (basestring, list, float, int) for p in parameters: if not isinstance(p, allowed_types): raise TypeError( "all parameters should be in {0}, got {1} of type {2}".format( allowed_types, p, type(p))) jdf = self._jdf.hint(name, self._jseq(parameters)) return DataFrame(jdf, self.sql_ctx) @since(1.3) def count(self): """Returns the number of rows in this :class:`DataFrame`. >>> df.count() 2 """ return int(self._jdf.count()) @ignore_unicode_prefix @since(1.3) def collect(self): """Returns all the records as a list of :class:`Row`. >>> df.collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.collectToPython() return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) @ignore_unicode_prefix @since(2.0) def toLocalIterator(self, prefetchPartitions=False): """ Returns an iterator that contains all of the rows in this :class:`DataFrame`. The iterator will consume as much memory as the largest partition in this DataFrame. With prefetch it may consume up to the memory of the 2 largest partitions. :param prefetchPartitions: If Spark should pre-fetch the next partition before it is needed. >>> list(df.toLocalIterator()) [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: sock_info = self._jdf.toPythonIterator(prefetchPartitions) return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer())) @ignore_unicode_prefix @since(1.3) def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """ jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def take(self, num): """Returns the first ``num`` rows as a :class:`list` of :class:`Row`. >>> df.take(2) [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ return self.limit(num).collect() @since(1.3) def foreach(self, f): """Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`. This is a shorthand for ``df.rdd.foreach()``. >>> def f(person): ... print(person.name) >>> df.foreach(f) """ self.rdd.foreach(f) @since(1.3) def foreachPartition(self, f): """Applies the ``f`` function to each partition of this :class:`DataFrame`. This a shorthand for ``df.rdd.foreachPartition()``. >>> def f(people): ... for person in people: ... print(person.name) >>> df.foreachPartition(f) """ self.rdd.foreachPartition(f) @since(1.3) def cache(self): """Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`). .. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0. """ self.is_cached = True self._jdf.cache() return self @since(1.3) def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK): """Sets the storage level to persist the contents of the :class:`DataFrame` across operations after the first time it is computed. This can only be used to assign a new storage level if the :class:`DataFrame` does not have a storage level set yet. If no storage level is specified defaults to (`MEMORY_AND_DISK`). .. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0. """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdf.persist(javaStorageLevel) return self @property @since(2.1) def storageLevel(self): """Get the :class:`DataFrame`'s current storage level. >>> df.storageLevel StorageLevel(False, False, False, False, 1) >>> df.cache().storageLevel StorageLevel(True, True, False, True, 1) >>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel StorageLevel(True, False, False, False, 2) """ java_storage_level = self._jdf.storageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level @since(1.3) def unpersist(self, blocking=False): """Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. .. note:: `blocking` default has changed to False to match Scala in 2.0. """ self.is_cached = False self._jdf.unpersist(blocking) return self @since(1.4) def coalesce(self, numPartitions): """ Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions. :param numPartitions: int, to specify the target number of partitions Similar to coalesce defined on an :class:`RDD`, this operation results in a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions. If a larger number of partitions is requested, it will stay at the current number of partitions. However, if you're doing a drastic coalesce, e.g. to numPartitions = 1, this may result in your computation taking place on fewer nodes than you like (e.g. one node in the case of numPartitions = 1). To avoid this, you can call repartition(). This will add a shuffle step, but means the current upstream partitions will be executed in parallel (per whatever the current partitioning is). >>> df.coalesce(1).rdd.getNumPartitions() 1 """ return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx) @since(1.3) def repartition(self, numPartitions, *cols): """ Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The resulting DataFrame is hash partitioned. :param numPartitions: can be an int to specify the target number of partitions or a Column. If it is a Column, it will be used as the first partitioning column. If not specified, the default number of partitions is used. .. versionchanged:: 1.6 Added optional arguments to specify the partitioning columns. Also made numPartitions optional if partitioning columns are specified. >>> df.repartition(10).rdd.getNumPartitions() 10 >>> data = df.union(df).repartition("age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 5| Bob| | 5| Bob| | 2|Alice| | 2|Alice| +---+-----+ >>> data = data.repartition(7, "age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| | 2|Alice| | 5| Bob| +---+-----+ >>> data.rdd.getNumPartitions() 7 >>> data = data.repartition("name", "age") >>> data.show() +---+-----+ |age| name| +---+-----+ | 5| Bob| | 5| Bob| | 2|Alice| | 2|Alice| +---+-----+ """ if isinstance(numPartitions, int): if len(cols) == 0: return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx) else: return DataFrame( self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx) elif isinstance(numPartitions, (basestring, Column)): cols = (numPartitions, ) + cols return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx) else: raise TypeError("numPartitions should be an int or Column") @since("2.4.0") def repartitionByRange(self, numPartitions, *cols): """ Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The resulting DataFrame is range partitioned. :param numPartitions: can be an int to specify the target number of partitions or a Column. If it is a Column, it will be used as the first partitioning column. If not specified, the default number of partitions is used. At least one partition-by expression must be specified. When no explicit sort order is specified, "ascending nulls first" is assumed. Note that due to performance reasons this method uses sampling to estimate the ranges. Hence, the output may not be consistent, since sampling can return different values. The sample size can be controlled by the config `spark.sql.execution.rangeExchange.sampleSizePerPartition`. >>> df.repartitionByRange(2, "age").rdd.getNumPartitions() 2 >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.repartitionByRange(1, "age").rdd.getNumPartitions() 1 >>> data = df.repartitionByRange("age") >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ """ if isinstance(numPartitions, int): if len(cols) == 0: return ValueError("At least one partition-by expression must be specified.") else: return DataFrame( self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx) elif isinstance(numPartitions, (basestring, Column)): cols = (numPartitions,) + cols return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx) else: raise TypeError("numPartitions should be an int, string or Column") @since(1.3) def distinct(self): """Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`. >>> df.distinct().count() 2 """ return DataFrame(self._jdf.distinct(), self.sql_ctx) @since(1.3) def sample(self, withReplacement=None, fraction=None, seed=None): """Returns a sampled subset of this :class:`DataFrame`. :param withReplacement: Sample with replacement or not (default False). :param fraction: Fraction of rows to generate, range [0.0, 1.0]. :param seed: Seed for sampling (default a random seed). .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. .. note:: `fraction` is required and, `withReplacement` and `seed` are optional. >>> df = spark.range(10) >>> df.sample(0.5, 3).count() 7 >>> df.sample(fraction=0.5, seed=3).count() 7 >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() 1 >>> df.sample(1.0).count() 10 >>> df.sample(fraction=1.0).count() 10 >>> df.sample(False, fraction=1.0).count() 10 """ # For the cases below: # sample(True, 0.5 [, seed]) # sample(True, fraction=0.5 [, seed]) # sample(withReplacement=False, fraction=0.5 [, seed]) is_withReplacement_set = \ type(withReplacement) == bool and isinstance(fraction, float) # For the case below: # sample(faction=0.5 [, seed]) is_withReplacement_omitted_kwargs = \ withReplacement is None and isinstance(fraction, float) # For the case below: # sample(0.5 [, seed]) is_withReplacement_omitted_args = isinstance(withReplacement, float) if not (is_withReplacement_set or is_withReplacement_omitted_kwargs or is_withReplacement_omitted_args): argtypes = [ str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None] raise TypeError( "withReplacement (optional), fraction (required) and seed (optional)" " should be a bool, float and number; however, " "got [%s]." % ", ".join(argtypes)) if is_withReplacement_omitted_args: if fraction is not None: seed = fraction fraction = withReplacement withReplacement = None seed = long(seed) if seed is not None else None args = [arg for arg in [withReplacement, fraction, seed] if arg is not None] jdf = self._jdf.sample(*args) return DataFrame(jdf, self.sql_ctx) @since(1.5) def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column` """ if isinstance(col, basestring): col = Column(col) elif not isinstance(col, Column): raise ValueError("col must be a string or a column, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) col = col._jc seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx) @since(1.4) def randomSplit(self, weights, seed=None): """Randomly splits this :class:`DataFrame` with the provided weights. :param weights: list of doubles as weights with which to split the DataFrame. Weights will be normalized if they don't sum up to 1.0. :param seed: The seed for sampling. >>> splits = df4.randomSplit([1.0, 2.0], 24) >>> splits[0].count() 2 >>> splits[1].count() 2 """ for w in weights: if w < 0.0: raise ValueError("Weights must be positive. Found weight value: %s" % w) seed = seed if seed is not None else random.randint(0, sys.maxsize) rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed)) return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array] @property @since(1.3) def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """ return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields] @property @since(1.3) def columns(self): """Returns all column names as a list. >>> df.columns ['age', 'name'] """ return [f.name for f in self.schema.fields] @since(2.3) def colRegex(self, colName): """ Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ """ if not isinstance(colName, basestring): raise ValueError("colName should be provided as string") jc = self._jdf.colRegex(colName) return Column(jc) @ignore_unicode_prefix @since(1.3) def alias(self, alias): """Returns a new :class:`DataFrame` with an alias set. :param alias: string, an alias name to be set for the DataFrame. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect() [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)] """ assert isinstance(alias, basestring), "alias should be a string" return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx) @ignore_unicode_prefix @since(2.1) def crossJoin(self, other): """Returns the cartesian product with another :class:`DataFrame`. :param other: Right side of the cartesian product. >>> df.select("age", "name").collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df2.select("name", "height").collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)] >>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect() [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85), Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)] """ jdf = self._jdf.crossJoin(other._jdf) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def join(self, other, on=None, how=None): """Joins with another :class:`DataFrame`, using the given join expression. :param other: Right side of the join :param on: a string for the join column name, a list of column names, a join expression (Column), or a list of Columns. If `on` is a string or a list of strings indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an equi-join. :param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``, ``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``, ``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``, ``anti``, ``leftanti`` and ``left_anti``. The following performs a full outer join between ``df1`` and ``df2``. >>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect() [Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)] >>> df.join(df2, 'name', 'outer').select('name', 'height').collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)] >>> cond = [df.name == df3.name, df.age == df3.age] >>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df.join(df2, 'name').select(df.name, df2.height).collect() [Row(name=u'Bob', height=85)] >>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect() [Row(name=u'Bob', age=5)] """ if on is not None and not isinstance(on, list): on = [on] if on is not None: if isinstance(on[0], basestring): on = self._jseq(on) else: assert isinstance(on[0], Column), "on should be Column or list of Column" on = reduce(lambda x, y: x.__and__(y), on) on = on._jc if on is None and how is None: jdf = self._jdf.join(other._jdf) else: if how is None: how = "inner" if on is None: on = self._jseq([]) assert isinstance(how, basestring), "how should be basestring" jdf = self._jdf.join(other._jdf, on, how) return DataFrame(jdf, self.sql_ctx) @since(1.6) def sortWithinPartitions(self, *cols, **kwargs): """Returns a new :class:`DataFrame` with each partition sorted by the specified column(s). :param cols: list of :class:`Column` or column names to sort by. :param ascending: boolean or list of boolean (default True). Sort ascending vs. descending. Specify list for multiple sort orders. If a list is specified, length of the list must equal length of the `cols`. >>> df.sortWithinPartitions("age", ascending=False).show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ """ jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def sort(self, *cols, **kwargs): """Returns a new :class:`DataFrame` sorted by the specified column(s). :param cols: list of :class:`Column` or column names to sort by. :param ascending: boolean or list of boolean (default True). Sort ascending vs. descending. Specify list for multiple sort orders. If a list is specified, length of the list must equal length of the `cols`. >>> df.sort(df.age.desc()).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.sort("age", ascending=False).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.orderBy(df.age.desc()).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> from pyspark.sql.functions import * >>> df.sort(asc("age")).collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df.orderBy(desc("age"), "name").collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.orderBy(["age", "name"], ascending=[0, 1]).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] """ jdf = self._jdf.sort(self._sort_cols(cols, kwargs)) return DataFrame(jdf, self.sql_ctx) orderBy = sort def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter) def _jmap(self, jm): """Return a JVM Scala Map from a dict""" return _to_scala_map(self.sql_ctx._sc, jm) def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column) def _sort_cols(self, cols, kwargs): """ Return a JVM Seq of Columns that describes the sort order """ if not cols: raise ValueError("should sort by at least one column") if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jcols = [_to_java_column(c) for c in cols] ascending = kwargs.get('ascending', True) if isinstance(ascending, (bool, int)): if not ascending: jcols = [jc.desc() for jc in jcols] elif isinstance(ascending, list): jcols = [jc if asc else jc.desc() for asc, jc in zip(ascending, jcols)] else: raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending)) return self._jseq(jcols) @since("1.3.1") def describe(self, *cols): """Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe(['age']).show() +-------+------------------+ |summary| age| +-------+------------------+ | count| 2| | mean| 3.5| | stddev|2.1213203435596424| | min| 2| | max| 5| +-------+------------------+ >>> df.describe().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | max| 5| Bob| +-------+------------------+-----+ Use summary for expanded statistics and control over which statistics to compute. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jdf = self._jdf.describe(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx) @since("2.3.0") def summary(self, *statistics): """Computes specified statistics for numeric and string columns. Available statistics are: - count - mean - stddev - min - max - arbitrary approximate percentiles specified as a percentage (eg, 75%) If no statistics are given, this function computes count, mean, stddev, min, approximate quartiles (percentiles at 25%, 50%, and 75%), and max. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.summary().show() +-------+------------------+-----+ |summary| age| name| +-------+------------------+-----+ | count| 2| 2| | mean| 3.5| null| | stddev|2.1213203435596424| null| | min| 2|Alice| | 25%| 2| null| | 50%| 2| null| | 75%| 5| null| | max| 5| Bob| +-------+------------------+-----+ >>> df.summary("count", "min", "25%", "75%", "max").show() +-------+---+-----+ |summary|age| name| +-------+---+-----+ | count| 2| 2| | min| 2|Alice| | 25%| 2| null| | 75%| 5| null| | max| 5| Bob| +-------+---+-----+ To do a summary for specific columns first select them: >>> df.select("age", "name").summary("count").show() +-------+---+----+ |summary|age|name| +-------+---+----+ | count| 2| 2| +-------+---+----+ See also describe for basic statistics. """ if len(statistics) == 1 and isinstance(statistics[0], list): statistics = statistics[0] jdf = self._jdf.summary(self._jseq(statistics)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def head(self, n=None): """Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')] """ if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n) @ignore_unicode_prefix @since(1.3) def first(self): """Returns the first row as a :class:`Row`. >>> df.first() Row(age=2, name=u'Alice') """ return self.head() @ignore_unicode_prefix @since(1.3) def __getitem__(self, item): """Returns the column as a :class:`Column`. >>> df.select(df['age']).collect() [Row(age=2), Row(age=5)] >>> df[ ["name", "age"]].collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df[ df.age > 3 ].collect() [Row(age=5, name=u'Bob')] >>> df[df[0] > 3].collect() [Row(age=5, name=u'Bob')] """ if isinstance(item, basestring): jc = self._jdf.apply(item) return Column(jc) elif isinstance(item, Column): return self.filter(item) elif isinstance(item, (list, tuple)): return self.select(*item) elif isinstance(item, int): jc = self._jdf.apply(self.columns[item]) return Column(jc) else: raise TypeError("unexpected item type: %s" % type(item)) @since(1.3) def __getattr__(self, name): """Returns the :class:`Column` denoted by ``name``. >>> df.select(df.age).collect() [Row(age=2), Row(age=5)] """ if name not in self.columns: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) jc = self._jdf.apply(name) return Column(jc) @ignore_unicode_prefix @since(1.3) def select(self, *cols): """Projects a set of expressions and returns a new :class:`DataFrame`. :param cols: list of column names (string) or expressions (:class:`Column`). If one of the column names is '*', that column is expanded to include all columns in the current DataFrame. >>> df.select('*').collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df.select('name', 'age').collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df.select(df.name, (df.age + 10).alias('age')).collect() [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)] """ jdf = self._jdf.select(self._jcols(*cols)) return DataFrame(jdf, self.sql_ctx) @since(1.3) def selectExpr(self, *expr): """Projects a set of SQL expressions and returns a new :class:`DataFrame`. This is a variant of :func:`select` that accepts SQL expressions. >>> df.selectExpr("age * 2", "abs(age)").collect() [Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)] """ if len(expr) == 1 and isinstance(expr[0], list): expr = expr[0] jdf = self._jdf.selectExpr(self._jseq(expr)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def filter(self, condition): """Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')] """ if isinstance(condition, basestring): jdf = self._jdf.filter(condition) elif isinstance(condition, Column): jdf = self._jdf.filter(condition._jc) else: raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def groupBy(self, *cols): """Groups the :class:`DataFrame` using the specified columns, so we can run aggregation on them. See :class:`GroupedData` for all the available aggregate functions. :func:`groupby` is an alias for :func:`groupBy`. :param cols: list of columns to group by. Each element should be a column name (string) or an expression (:class:`Column`). >>> df.groupBy().avg().collect() [Row(avg(age)=3.5)] >>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect()) [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> sorted(df.groupBy(df.name).avg().collect()) [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> sorted(df.groupBy(['name', df.age]).count().collect()) [Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)] """ jgd = self._jdf.groupBy(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self) @since(1.4) def rollup(self, *cols): """ Create a multi-dimensional rollup for the current :class:`DataFrame` using the specified columns, so we can run aggregation on them. >>> df.rollup("name", df.age).count().orderBy("name", "age").show() +-----+----+-----+ | name| age|count| +-----+----+-----+ | null|null| 2| |Alice|null| 1| |Alice| 2| 1| | Bob|null| 1| | Bob| 5| 1| +-----+----+-----+ """ jgd = self._jdf.rollup(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self) @since(1.4) def cube(self, *cols): """ Create a multi-dimensional cube for the current :class:`DataFrame` using the specified columns, so we can run aggregation on them. >>> df.cube("name", df.age).count().orderBy("name", "age").show() +-----+----+-----+ | name| age|count| +-----+----+-----+ | null|null| 2| | null| 2| 1| | null| 5| 1| |Alice|null| 1| |Alice| 2| 1| | Bob|null| 1| | Bob| 5| 1| +-----+----+-----+ """ jgd = self._jdf.cube(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self) @since(1.3) def agg(self, *exprs): """ Aggregate on the entire :class:`DataFrame` without groups (shorthand for ``df.groupBy.agg()``). >>> df.agg({"age": "max"}).collect() [Row(max(age)=5)] >>> from pyspark.sql import functions as F >>> df.agg(F.min(df.age)).collect() [Row(min(age)=2)] """ return self.groupBy().agg(*exprs) @since(2.0) def union(self, other): """ Return a new :class:`DataFrame` containing union of rows in this and another frame. This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.union(other._jdf), self.sql_ctx) @since(1.3) def unionAll(self, other): """ Return a new :class:`DataFrame` containing union of rows in this and another frame. This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. Also as standard in SQL, this function resolves columns by position (not by name). """ return self.union(other) @since(2.3) def unionByName(self, other): """ Returns a new :class:`DataFrame` containing union of rows in this and another frame. This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. The difference between this function and :func:`union` is that this function resolves columns by name (not by position): >>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"]) >>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"]) >>> df1.unionByName(df2).show() +----+----+----+ |col0|col1|col2| +----+----+----+ | 1| 2| 3| | 6| 4| 5| +----+----+----+ """ return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx) @since(1.3) def intersect(self, other): """ Return a new :class:`DataFrame` containing rows only in both this frame and another frame. This is equivalent to `INTERSECT` in SQL. """ return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx) @since(2.4) def intersectAll(self, other): """ Return a new :class:`DataFrame` containing rows in both this dataframe and other dataframe while preserving duplicates. This is equivalent to `INTERSECT ALL` in SQL. >>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"]) >>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"]) >>> df1.intersectAll(df2).sort("C1", "C2").show() +---+---+ | C1| C2| +---+---+ | a| 1| | a| 1| | b| 3| +---+---+ Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx) @since(1.3) def subtract(self, other): """ Return a new :class:`DataFrame` containing rows in this frame but not in another frame. This is equivalent to `EXCEPT DISTINCT` in SQL. """ return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx) @since(1.4) def dropDuplicates(self, subset=None): """Return a new :class:`DataFrame` with duplicate rows removed, optionally only considering certain columns. For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming :class:`DataFrame`, it will keep all data across triggers as intermediate state to drop duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can be and system will accordingly limit the state. In addition, too late data older than watermark will be dropped to avoid any possibility of duplicates. :func:`drop_duplicates` is an alias for :func:`dropDuplicates`. >>> from pyspark.sql import Row >>> df = sc.parallelize([ \\ ... Row(name='Alice', age=5, height=80), \\ ... Row(name='Alice', age=5, height=80), \\ ... Row(name='Alice', age=10, height=80)]).toDF() >>> df.dropDuplicates().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| | 10| 80|Alice| +---+------+-----+ >>> df.dropDuplicates(['name', 'height']).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| +---+------+-----+ """ if subset is None: jdf = self._jdf.dropDuplicates() else: jdf = self._jdf.dropDuplicates(self._jseq(subset)) return DataFrame(jdf, self.sql_ctx) @since("1.3.1") def dropna(self, how='any', thresh=None, subset=None): """Returns a new :class:`DataFrame` omitting rows with null values. :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other. :param how: 'any' or 'all'. If 'any', drop a row if it contains any nulls. If 'all', drop a row only if all its values are null. :param thresh: int, default None If specified, drop rows that have less than `thresh` non-null values. This overwrites the `how` parameter. :param subset: optional list of column names to consider. >>> df4.na.drop().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| +---+------+-----+ """ if how is not None and how not in ['any', 'all']: raise ValueError("how ('" + how + "') should be 'any' or 'all'") if subset is None: subset = self.columns elif isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") if thresh is None: thresh = len(subset) if how == 'any' else 1 return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx) @since("1.3.1") def fillna(self, value, subset=None): """Replace null values, alias for ``na.fill()``. :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other. :param value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then `subset` is ignored and `value` must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.fill(50).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| | 5| 50| Bob| | 50| 50| Tom| | 50| 50| null| +---+------+-----+ >>> df5.na.fill(False).show() +----+-------+-----+ | age| name| spy| +----+-------+-----+ | 10| Alice|false| | 5| Bob|false| |null|Mallory| true| +----+-------+-----+ >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show() +---+------+-------+ |age|height| name| +---+------+-------+ | 10| 80| Alice| | 5| null| Bob| | 50| null| Tom| | 50| null|unknown| +---+------+-------+ """ if not isinstance(value, (float, int, long, basestring, bool, dict)): raise ValueError("value should be a float, int, long, string, bool or dict") # Note that bool validates isinstance(int), but we don't want to # convert bools to floats if not isinstance(value, bool) and isinstance(value, (int, long)): value = float(value) if isinstance(value, dict): return DataFrame(self._jdf.na().fill(value), self.sql_ctx) elif subset is None: return DataFrame(self._jdf.na().fill(value), self.sql_ctx) else: if isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx) @since(1.4) def replace(self, to_replace, value=_NoValue, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if value is _NoValue: if isinstance(to_replace, dict): value = None else: raise TypeError("value argument is required when to_replace is not a dictionary.") # Helper functions def all_of(types): """Given a type or tuple of types and a sequence of xs check if each x is instance of type(s) >>> all_of(bool)([True, False]) True >>> all_of(basestring)(["a", 1]) False """ def all_of_(xs): return all(isinstance(x, types) for x in xs) return all_of_ all_of_bool = all_of(bool) all_of_str = all_of(basestring) all_of_numeric = all_of((float, int, long)) # Validate input types valid_types = (bool, float, int, long, basestring, list, tuple) if not isinstance(to_replace, valid_types + (dict, )): raise ValueError( "to_replace should be a bool, float, int, long, string, list, tuple, or dict. " "Got {0}".format(type(to_replace))) if not isinstance(value, valid_types) and value is not None \ and not isinstance(to_replace, dict): raise ValueError("If to_replace is not a dict, value should be " "a bool, float, int, long, string, list, tuple or None. " "Got {0}".format(type(value))) if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length. " "Got {0} and {1}".format(len(to_replace), len(value))) if not (subset is None or isinstance(subset, (list, tuple, basestring))): raise ValueError("subset should be a list or tuple of column names, " "column name or None. Got {0}".format(type(subset))) # Reshape input arguments if necessary if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, dict): rep_dict = to_replace if value is not None: warnings.warn("to_replace is a dict and value is not None. value will be ignored.") else: if isinstance(value, (float, int, long, basestring)) or value is None: value = [value for _ in range(len(to_replace))] rep_dict = dict(zip(to_replace, value)) if isinstance(subset, basestring): subset = [subset] # Verify we were not passed in mixed type generics. if not any(all_of_type(rep_dict.keys()) and all_of_type(x for x in rep_dict.values() if x is not None) for all_of_type in [all_of_bool, all_of_str, all_of_numeric]): raise ValueError("Mixed type replacements are not supported") if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) else: return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx) @since(2.0) def approxQuantile(self, col, probabilities, relativeError): """ Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns. """ if not isinstance(col, (basestring, list, tuple)): raise ValueError("col should be a string, list or tuple, but got %r" % type(col)) isStr = isinstance(col, basestring) if isinstance(col, tuple): col = list(col) elif isStr: col = [col] for c in col: if not isinstance(c, basestring): raise ValueError("columns should be strings, but got %r" % type(c)) col = _to_list(self._sc, col) if not isinstance(probabilities, (list, tuple)): raise ValueError("probabilities should be a list or tuple") if isinstance(probabilities, tuple): probabilities = list(probabilities) for p in probabilities: if not isinstance(p, (float, int, long)) or p < 0 or p > 1: raise ValueError("probabilities should be numerical (float, int, long) in [0,1].") probabilities = _to_list(self._sc, probabilities) if not isinstance(relativeError, (float, int, long)) or relativeError < 0: raise ValueError("relativeError should be numerical (float, int, long) >= 0.") relativeError = float(relativeError) jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError) jaq_list = [list(j) for j in jaq] return jaq_list[0] if isStr else jaq_list @since(1.4) def corr(self, col1, col2, method=None): """ Calculates the correlation of two columns of a DataFrame as a double value. Currently only supports the Pearson Correlation Coefficient. :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other. :param col1: The name of the first column :param col2: The name of the second column :param method: The correlation method. Currently only supports "pearson" """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") if not method: method = "pearson" if not method == "pearson": raise ValueError("Currently only the calculation of the Pearson Correlation " + "coefficient is supported.") return self._jdf.stat().corr(col1, col2, method) @since(1.4) def cov(self, col1, col2): """ Calculate the sample covariance for the given columns, specified by their names, as a double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases. :param col1: The name of the first column :param col2: The name of the second column """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") return self._jdf.stat().cov(col1, col2) @since(1.4) def crosstab(self, col1, col2): """ Computes a pair-wise frequency table of the given columns. Also known as a contingency table. The number of distinct values for each column should be less than 1e4. At most 1e6 non-zero pair frequencies will be returned. The first column of each row will be the distinct values of `col1` and the column names will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`. Pairs that have no occurrences will have zero as their counts. :func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases. :param col1: The name of the first column. Distinct items will make the first item of each row. :param col2: The name of the second column. Distinct items will make the column names of the DataFrame. """ if not isinstance(col1, basestring): raise ValueError("col1 should be a string.") if not isinstance(col2, basestring): raise ValueError("col2 should be a string.") return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx) @since(1.4) def freqItems(self, cols, support=None): """ Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in "https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou". :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. :param cols: Names of the columns to calculate frequent items for as a list or tuple of strings. :param support: The frequency with which to consider an item 'frequent'. Default is 1%. The support must be greater than 1e-4. """ if isinstance(cols, tuple): cols = list(cols) if not isinstance(cols, list): raise ValueError("cols must be a list or tuple of column names as strings.") if not support: support = 0.01 return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx) @ignore_unicode_prefix @since(1.3) def withColumn(self, colName, col): """ Returns a new :class:`DataFrame` by adding a column or replacing the existing column that has the same name. The column expression must be an expression over this DataFrame; attempting to add a column from some other dataframe will raise an error. :param colName: string, name of the new column. :param col: a :class:`Column` expression for the new column. .. note:: This method introduces a projection internally. Therefore, calling it multiple times, for instance, via loops in order to add multiple columns can generate big plans which can cause performance issues and even `StackOverflowException`. To avoid this, use :func:`select` with the multiple columns at once. >>> df.withColumn('age2', df.age + 2).collect() [Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)] """ assert isinstance(col, Column), "col should be Column" return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx) @ignore_unicode_prefix @since(1.3) def withColumnRenamed(self, existing, new): """Returns a new :class:`DataFrame` by renaming an existing column. This is a no-op if schema doesn't contain the given column name. :param existing: string, name of the existing column to rename. :param new: string, new name of the column. >>> df.withColumnRenamed('age', 'age2').collect() [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')] """ return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx) @since(1.4) @ignore_unicode_prefix def drop(self, *cols): """Returns a new :class:`DataFrame` that drops the specified column. This is a no-op if schema doesn't contain the given column name(s). :param cols: a string name of the column to drop, or a :class:`Column` to drop, or a list of string name of the columns to drop. >>> df.drop('age').collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.drop(df.age).collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect() [Row(age=5, height=85, name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect() [Row(age=5, name=u'Bob', height=85)] >>> df.join(df2, 'name', 'inner').drop('age', 'height').collect() [Row(name=u'Bob')] """ if len(cols) == 1: col = cols[0] if isinstance(col, basestring): jdf = self._jdf.drop(col) elif isinstance(col, Column): jdf = self._jdf.drop(col._jc) else: raise TypeError("col should be a string or a Column") else: for col in cols: if not isinstance(col, basestring): raise TypeError("each col in the param list should be a string") jdf = self._jdf.drop(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix def toDF(self, *cols): """Returns a new class:`DataFrame` that with new specified column names :param cols: list of new column names (string) >>> df.toDF('f1', 'f2').collect() [Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')] """ jdf = self._jdf.toDF(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx) @since(3.0) def transform(self, func): """Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations. :param func: a function that takes and returns a class:`DataFrame`. >>> from pyspark.sql.functions import col >>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"]) >>> def cast_all_to_int(input_df): ... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns]) >>> def sort_columns_asc(input_df): ... return input_df.select(*sorted(input_df.columns)) >>> df.transform(cast_all_to_int).transform(sort_columns_asc).show() +-----+---+ |float|int| +-----+---+ | 1| 1| | 2| 2| +-----+---+ """ result = func(self) assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \ "should have been DataFrame." % type(result) return result @since(1.3) def toPandas(self): """ Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``. This is only available if Pandas is installed and available. .. note:: This method should only be used if the resulting Pandas's DataFrame is expected to be small, as all the data is loaded into the driver's memory. .. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental. >>> df.toPandas() # doctest: +SKIP age name 0 2 Alice 1 5 Bob """ from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() import pandas as pd if self.sql_ctx._conf.pandasRespectSessionTimeZone(): timezone = self.sql_ctx._conf.sessionLocalTimeZone() else: timezone = None if self.sql_ctx._conf.arrowPySparkEnabled(): use_arrow = True try: from pyspark.sql.types import to_arrow_schema from pyspark.sql.utils import require_minimum_pyarrow_version require_minimum_pyarrow_version() to_arrow_schema(self.schema) except Exception as e: if self.sql_ctx._conf.arrowPySparkFallbackEnabled(): msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, " "failed by the reason below:\n %s\n" "Attempting non-optimization as " "'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to " "true." % _exception_message(e)) warnings.warn(msg) use_arrow = False else: msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has " "reached the error below and will not continue because automatic fallback " "with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to " "false.\n %s" % _exception_message(e)) warnings.warn(msg) raise # Try to use Arrow optimization when the schema is supported and the required version # of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled. if use_arrow: try: from pyspark.sql.types import _check_dataframe_localize_timestamps import pyarrow batches = self._collectAsArrow() if len(batches) > 0: table = pyarrow.Table.from_batches(batches) # Pandas DataFrame created from PyArrow uses datetime64[ns] for date type # values, but we should use datetime.date to match the behavior with when # Arrow optimization is disabled. pdf = table.to_pandas(date_as_object=True) return _check_dataframe_localize_timestamps(pdf, timezone) else: return pd.DataFrame.from_records([], columns=self.columns) except Exception as e: # We might have to allow fallback here as well but multiple Spark jobs can # be executed. So, simply fail in this case for now. msg = ( "toPandas attempted Arrow optimization because " "'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has " "reached the error below and can not continue. Note that " "'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an " "effect on failures in the middle of " "computation.\n %s" % _exception_message(e)) warnings.warn(msg) raise # Below is toPandas without Arrow optimization. pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns) dtype = {} for field in self.schema: pandas_type = _to_corrected_pandas_type(field.dataType) # SPARK-21766: if an integer field is nullable and has null values, it can be # inferred by pandas as float column. Once we convert the column with NaN back # to integer type e.g., np.int16, we will hit exception. So we use the inferred # float type, not the corrected type from the schema in this case. if pandas_type is not None and \ not(isinstance(field.dataType, IntegralType) and field.nullable and pdf[field.name].isnull().any()): dtype[field.name] = pandas_type for f, t in dtype.items(): pdf[f] = pdf[f].astype(t, copy=False) if timezone is None: return pdf else: from pyspark.sql.types import _check_series_convert_timestamps_local_tz for field in self.schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): pdf[field.name] = \ _check_series_convert_timestamps_local_tz(pdf[field.name], timezone) return pdf def mapInPandas(self, udf): """ Maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined function and returns the result as a :class:`DataFrame`. The user-defined function should take an iterator of `pandas.DataFrame`\\s and return another iterator of `pandas.DataFrame`\\s. All columns are passed together as an iterator of `pandas.DataFrame`\\s to the user-defined function and the returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`. Each `pandas.DataFrame` size can be controlled by `spark.sql.execution.arrow.maxRecordsPerBatch`. Its schema must match the returnType of the Pandas user-defined function. :param udf: A function object returned by :meth:`pyspark.sql.functions.pandas_udf` >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame([(1, 21), (2, 30)], ... ("id", "age")) # doctest: +SKIP >>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP ... def filter_func(batch_iter): ... for pdf in batch_iter: ... yield pdf[pdf.id == 1] >>> df.mapInPandas(filter_func).show() # doctest: +SKIP +---+---+ | id|age| +---+---+ | 1| 21| +---+---+ .. seealso:: :meth:`pyspark.sql.functions.pandas_udf` """ # Columns are special because hasattr always return True if isinstance(udf, Column) or not hasattr(udf, 'func') \ or udf.evalType != PythonEvalType.SQL_MAP_PANDAS_ITER_UDF: raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type " "MAP_ITER.") udf_column = udf(*[self[col] for col in self.columns]) jdf = self._jdf.mapInPandas(udf_column._jc.expr()) return DataFrame(jdf, self.sql_ctx) def _collectAsArrow(self): """ Returns all records as a list of ArrowRecordBatches, pyarrow must be installed and available on driver and worker Python environments. .. note:: Experimental. """ with SCCallSiteSync(self._sc) as css: port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython() # Collect list of un-ordered batches where last element is a list of correct order indices try: results = list(_load_from_socket((port, auth_secret), ArrowCollectSerializer())) finally: # Join serving thread and raise any exceptions from collectAsArrowToPython jsocket_auth_server.getResult() # Separate RecordBatches from batch order indices in results batches = results[:-1] batch_order = results[-1] # Re-order the batch list using the correct order return [batches[i] for i in batch_order] ########################################################################################## # Pandas compatibility ########################################################################################## groupby = copy_func( groupBy, sinceversion=1.4, doc=":func:`groupby` is an alias for :func:`groupBy`.") drop_duplicates = copy_func( dropDuplicates, sinceversion=1.4, doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.") where = copy_func( filter, sinceversion=1.3, doc=":func:`where` is an alias for :func:`filter`.") def _to_scala_map(sc, jm): """ Convert a dict into a JVM Map. """ return sc._jvm.PythonUtils.toScalaMap(jm) def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None class DataFrameNaFunctions(object): """Functionality for working with missing data in :class:`DataFrame`. .. versionadded:: 1.4 """ def __init__(self, df): self.df = df def drop(self, how='any', thresh=None, subset=None): return self.df.dropna(how=how, thresh=thresh, subset=subset) drop.__doc__ = DataFrame.dropna.__doc__ def fill(self, value, subset=None): return self.df.fillna(value=value, subset=subset) fill.__doc__ = DataFrame.fillna.__doc__ def replace(self, to_replace, value=_NoValue, subset=None): return self.df.replace(to_replace, value, subset) replace.__doc__ = DataFrame.replace.__doc__ class DataFrameStatFunctions(object): """Functionality for statistic functions with :class:`DataFrame`. .. versionadded:: 1.4 """ def __init__(self, df): self.df = df def approxQuantile(self, col, probabilities, relativeError): return self.df.approxQuantile(col, probabilities, relativeError) approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__ def corr(self, col1, col2, method=None): return self.df.corr(col1, col2, method) corr.__doc__ = DataFrame.corr.__doc__ def cov(self, col1, col2): return self.df.cov(col1, col2) cov.__doc__ = DataFrame.cov.__doc__ def crosstab(self, col1, col2): return self.df.crosstab(col1, col2) crosstab.__doc__ = DataFrame.crosstab.__doc__ def freqItems(self, cols, support=None): return self.df.freqItems(cols, support) freqItems.__doc__ = DataFrame.freqItems.__doc__ def sampleBy(self, col, fractions, seed=None): return self.df.sampleBy(col, fractions, seed) sampleBy.__doc__ = DataFrame.sampleBy.__doc__ def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext, SparkSession import pyspark.sql.dataframe from pyspark.sql.functions import from_unixtime globs = pyspark.sql.dataframe.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['spark'] = SparkSession(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF() globs['df3'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80), Row(name='Bob', age=5, height=None), Row(name='Tom', age=None, height=None), Row(name=None, age=None, height=None)]).toDF() globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10), Row(name='Bob', spy=None, age=5), Row(name='Mallory', spy=True, age=None)]).toDF() globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846), Row(name='Bob', time=1479442946)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.dataframe, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
apache-2.0
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/numpy/linalg/linalg.py
24
77839
"""Lite version of scipy.linalg. Notes ----- This module is a lite version of the linalg.py module in SciPy which contains high-level Python interface to the LAPACK library. The lite version only accesses the following LAPACK functions: dgesv, zgesv, dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. """ from __future__ import division, absolute_import, print_function __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError', 'multi_dot'] import warnings from numpy.core import ( array, asarray, zeros, empty, empty_like, transpose, intc, single, double, csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs, broadcast, atleast_2d, intp, asanyarray, isscalar, object_ ) from numpy.lib import triu, asfarray from numpy.linalg import lapack_lite, _umath_linalg from numpy.matrixlib.defmatrix import matrix_power from numpy.compat import asbytes # For Python2/3 compatibility _N = asbytes('N') _V = asbytes('V') _A = asbytes('A') _S = asbytes('S') _L = asbytes('L') fortran_int = intc # Error object class LinAlgError(Exception): """ Generic Python-exception-derived object raised by linalg functions. General purpose exception class, derived from Python's exception.Exception class, programmatically raised in linalg functions when a Linear Algebra-related condition would prevent further correct execution of the function. Parameters ---------- None Examples -------- >>> from numpy import linalg as LA >>> LA.inv(np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "...linalg.py", line 350, in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) File "...linalg.py", line 249, in solve raise LinAlgError('Singular matrix') numpy.linalg.LinAlgError: Singular matrix """ pass # Dealing with errors in _umath_linalg _linalg_error_extobj = None def _determine_error_states(): global _linalg_error_extobj errobj = geterrobj() bufsize = errobj[0] with errstate(invalid='call', over='ignore', divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] _linalg_error_extobj = [bufsize, invalid_call_errmask, None] _determine_error_states() def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix") def _raise_linalgerror_nonposdef(err, flag): raise LinAlgError("Matrix is not positive definite") def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): raise LinAlgError("Eigenvalues did not converge") def _raise_linalgerror_svd_nonconvergence(err, flag): raise LinAlgError("SVD did not converge") def get_linalg_error_extobj(callback): extobj = list(_linalg_error_extobj) extobj[2] = callback return extobj def _makearray(a): new = asarray(a) wrap = getattr(a, "__array_prepare__", new.__array_wrap__) return new, wrap def isComplexType(t): return issubclass(t, complexfloating) _real_types_map = {single : single, double : double, csingle : single, cdouble : double} _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _realType(t, default=double): return _real_types_map.get(t, default) def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) def _linalgRealType(t): """Cast the type t to either double or cdouble.""" return double _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single is_complex = False for a in arrays: if issubclass(a.dtype.type, inexact): if isComplexType(a.dtype.type): is_complex = True rt = _realType(a.dtype.type, default=None) if rt is None: # unsupported inexact scalar raise TypeError("array type %s is unsupported in linalg" % (a.dtype.name,)) else: rt = double if rt is double: result_type = double if is_complex: t = cdouble result_type = _complex_types_map[result_type] else: t = double return t, result_type # _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). _fastCT = fastCopyAndTranspose def _to_native_byte_order(*arrays): ret = [] for arr in arrays: if arr.dtype.byteorder not in ('=', '|'): ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) else: ret.append(arr) if len(ret) == 1: return ret[0] else: return ret def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.type is type: cast_arrays = cast_arrays + (_fastCT(a),) else: cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def _assertRank2(*arrays): for a in arrays: if len(a.shape) != 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'two-dimensional' % len(a.shape)) def _assertRankAtLeast2(*arrays): for a in arrays: if len(a.shape) < 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'at least two-dimensional' % len(a.shape)) def _assertSquareness(*arrays): for a in arrays: if max(a.shape) != min(a.shape): raise LinAlgError('Array must be square') def _assertNdSquareness(*arrays): for a in arrays: if max(a.shape[-2:]) != min(a.shape[-2:]): raise LinAlgError('Last 2 dimensions of the array must be square') def _assertFinite(*arrays): for a in arrays: if not (isfinite(a).all()): raise LinAlgError("Array must not contain infs or NaNs") def _assertNoEmpty2d(*arrays): for a in arrays: if a.size == 0 and product(a.shape[-2:]) == 0: raise LinAlgError("Arrays cannot be empty") # Linear equations def tensorsolve(a, b, axes=None): """ Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=len(b.shape))``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True """ a, wrap = _makearray(a) b = asarray(b) an = a.ndim if axes is not None: allaxes = list(range(0, an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) oldshape = a.shape[-(an-b.ndim):] prod = 1 for k in oldshape: prod *= k a = a.reshape(-1, prod) b = b.ravel() res = wrap(solve(a, b)) res.shape = oldshape return res def solve(a, b): """ Solve a linear matrix equation, or system of linear scalar equations. Computes the "exact" solution, `x`, of the well-determined, i.e., full rank, linear matrix equation `ax = b`. Parameters ---------- a : (..., M, M) array_like Coefficient matrix. b : {(..., M,), (..., M, K)}, array_like Ordinate or "dependent variable" values. Returns ------- x : {(..., M,), (..., M, K)} ndarray Solution to the system a x = b. Returned shape is identical to `b`. Raises ------ LinAlgError If `a` is singular or not square. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The solutions are computed using LAPACK routine _gesv `a` must be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent; if either is not true, use `lstsq` for the least-squares best "solution" of the system/equation. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 22. Examples -------- Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: >>> a = np.array([[3,1], [1,2]]) >>> b = np.array([9,8]) >>> x = np.linalg.solve(a, b) >>> x array([ 2., 3.]) Check that the solution is correct: >>> np.allclose(np.dot(a, x), b) True """ a, _ = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) # We use the b = (..., M,) logic, only if the number of extra dimensions # match exactly if b.ndim == a.ndim - 1: if a.shape[-1] == 0 and b.shape[-1] == 0: # Legal, but the ufunc cannot handle the 0-sized inner dims # let the ufunc handle all wrong cases. a = a.reshape(a.shape[:-1]) bc = broadcast(a, b) return wrap(empty(bc.shape, dtype=result_t)) gufunc = _umath_linalg.solve1 else: if b.size == 0: if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0: a = a[:,:1].reshape(a.shape[:-1] + (1,)) bc = broadcast(a, b) return wrap(empty(bc.shape, dtype=result_t)) gufunc = _umath_linalg.solve signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) r = gufunc(a, b, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) def tensorinv(a, ind=2): """ Compute the 'inverse' of an N-dimensional array. The result is an inverse for `a` relative to the tensordot operation ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the tensordot operation. Parameters ---------- a : array_like Tensor to 'invert'. Its shape must be 'square', i. e., ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. ind : int, optional Number of first indices that are involved in the inverse sum. Must be a positive integer, default is 2. Returns ------- b : ndarray `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorsolve Examples -------- >>> a = np.eye(4*6) >>> a.shape = (4, 6, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) >>> b = np.random.randn(4, 6) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True >>> a = np.eye(4*6) >>> a.shape = (24, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) >>> b = np.random.randn(24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True """ a = asarray(a) oldshape = a.shape prod = 1 if ind > 0: invshape = oldshape[ind:] + oldshape[:ind] for k in oldshape[ind:]: prod *= k else: raise ValueError("Invalid ind argument.") a = a.reshape(prod, -1) ia = inv(a) return ia.reshape(*invshape) # Matrix inversion def inv(a): """ Compute the (multiplicative) inverse of a matrix. Given a square matrix `a`, return the matrix `ainv` satisfying ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. Parameters ---------- a : (..., M, M) array_like Matrix to be inverted. Returns ------- ainv : (..., M, M) ndarray or matrix (Multiplicative) inverse of the matrix `a`. Raises ------ LinAlgError If `a` is not square or inversion fails. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. Examples -------- >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) >>> np.allclose(np.dot(a, ainv), np.eye(2)) True >>> np.allclose(np.dot(ainv, a), np.eye(2)) True If a is a matrix object, then the return value is a matrix as well: >>> ainv = inv(np.matrix(a)) >>> ainv matrix([[-2. , 1. ], [ 1.5, -0.5]]) Inverses of several matrices can be computed at once: >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) >>> inv(a) array([[[-2. , 1. ], [ 1.5, -0.5]], [[-5. , 2. ], [ 3. , -1. ]]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) if a.shape[-1] == 0: # The inner array is 0x0, the ufunc cannot handle this case return wrap(empty_like(a, dtype=result_t)) signature = 'D->D' if isComplexType(t) else 'd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) return wrap(ainv.astype(result_t, copy=False)) # Cholesky decomposition def cholesky(a): """ Cholesky decomposition. Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, where `L` is lower-triangular and .H is the conjugate transpose operator (which is the ordinary transpose if `a` is real-valued). `a` must be Hermitian (symmetric if real-valued) and positive-definite. Only `L` is actually returned. Parameters ---------- a : (..., M, M) array_like Hermitian (symmetric if all elements are real), positive-definite input matrix. Returns ------- L : (..., M, M) array_like Upper or lower-triangular Cholesky factor of `a`. Returns a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the decomposition fails, for example, if `a` is not positive-definite. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The Cholesky decomposition is often used as a fast way of solving .. math:: A \\mathbf{x} = \\mathbf{b} (when `A` is both Hermitian/symmetric and positive-definite). First, we solve for :math:`\\mathbf{y}` in .. math:: L \\mathbf{y} = \\mathbf{b}, and then for :math:`\\mathbf{x}` in .. math:: L.H \\mathbf{x} = \\mathbf{y}. Examples -------- >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> L = np.linalg.cholesky(A) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> np.dot(L, L.T.conj()) # verify that L * L.H = A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? >>> np.linalg.cholesky(A) # an ndarray object is returned array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> # But a matrix object is returned if A is a matrix object >>> LA.cholesky(np.matrix(A)) matrix([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) """ extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) gufunc = _umath_linalg.cholesky_lo a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = gufunc(a, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) # QR decompostion def qr(a, mode='reduced'): """ Compute the qr factorization of a matrix. Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is upper-triangular. Parameters ---------- a : array_like, shape (M, N) Matrix to be factored. mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional If K = min(M, N), then 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) 'complete' : returns q, r with dimensions (M, M), (M, N) 'r' : returns r only with dimensions (K, N) 'raw' : returns h, tau with dimensions (N, M), (K,) 'full' : alias of 'reduced', deprecated 'economic' : returns h from 'raw', deprecated. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, see the notes for more information. The default is 'reduced' and to maintain backward compatibility with earlier versions of numpy both it and the old default 'full' can be omitted. Note that array h returned in 'raw' mode is transposed for calling Fortran. The 'economic' mode is deprecated. The modes 'full' and 'economic' may be passed using only the first letter for backwards compatibility, but all others must be spelled out. See the Notes for more explanation. Returns ------- q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not a is real/complex. The determinant may be either +/- 1 in that case. r : ndarray of float or complex, optional The upper-triangular matrix. (h, tau) : ndarrays of np.double or np.cdouble, optional The array h contains the Householder reflectors that generate q along with r. The tau array contains scaling factors for the reflectors. In the deprecated 'economic' mode only h is returned. Raises ------ LinAlgError If factoring fails. Notes ----- This is an interface to the LAPACK routines dgeqrf, zgeqrf, dorgqr, and zungqr. For more information on the qr factorization, see for example: http://en.wikipedia.org/wiki/QR_factorization Subclasses of `ndarray` are preserved except for the 'raw' mode. So if `a` is of type `matrix`, all the return values will be matrices too. New 'reduced', 'complete', and 'raw' options for mode were added in NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In addition the options 'full' and 'economic' were deprecated. Because 'full' was the previous default and 'reduced' is the new default, backward compatibility can be maintained by letting `mode` default. The 'raw' option was added so that LAPACK routines that can multiply arrays by q using the Householder reflectors can be used. Note that in this case the returned arrays are of type np.double or np.cdouble and the h array is transposed to be FORTRAN compatible. No routines using the 'raw' return are currently exposed by numpy, but some are available in lapack_lite and just await the necessary work. Examples -------- >>> a = np.random.randn(9, 6) >>> q, r = np.linalg.qr(a) >>> np.allclose(a, np.dot(q, r)) # a does equal qr True >>> r2 = np.linalg.qr(a, mode='r') >>> r3 = np.linalg.qr(a, mode='economic') >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' True >>> # But only triu parts are guaranteed equal when mode='economic' >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) True Example illustrating a common use of `qr`: solving of least squares problems What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points and you'll see that it should be y0 = 0, m = 1.) The answer is provided by solving the over-determined matrix equation ``Ax = b``, where:: A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) x = array([[y0], [m]]) b = array([[1], [0], [2], [1]]) If A = qr such that q is orthonormal (which is always possible via Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, however, we simply use `lstsq`.) >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> A array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> b = np.array([1, 0, 2, 1]) >>> q, r = LA.qr(A) >>> p = np.dot(q.T, b) >>> np.dot(LA.inv(r), p) array([ 1.1e-16, 1.0e+00]) """ if mode not in ('reduced', 'complete', 'r', 'raw'): if mode in ('f', 'full'): # 2013-04-01, 1.8 msg = "".join(( "The 'full' option is deprecated in favor of 'reduced'.\n", "For backward compatibility let mode default.")) warnings.warn(msg, DeprecationWarning, stacklevel=2) mode = 'reduced' elif mode in ('e', 'economic'): # 2013-04-01, 1.8 msg = "The 'economic' option is deprecated." warnings.warn(msg, DeprecationWarning, stacklevel=2) mode = 'economic' else: raise ValueError("Unrecognized mode '%s'" % mode) a, wrap = _makearray(a) _assertRank2(a) _assertNoEmpty2d(a) m, n = a.shape t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) a = _to_native_byte_order(a) mn = min(m, n) tau = zeros((mn,), t) if isComplexType(t): lapack_routine = lapack_lite.zgeqrf routine_name = 'zgeqrf' else: lapack_routine = lapack_lite.dgeqrf routine_name = 'dgeqrf' # calculate optimal size of work data 'work' lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # do qr decomposition lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # handle modes that don't return q if mode == 'r': r = _fastCopyAndTranspose(result_t, a[:, :mn]) return wrap(triu(r)) if mode == 'raw': return a, tau if mode == 'economic': if t != result_t : a = a.astype(result_t, copy=False) return wrap(a.T) # generate q from a if mode == 'complete' and m > n: mc = m q = empty((m, m), t) else: mc = mn q = empty((n, m), t) q[:n] = a if isComplexType(t): lapack_routine = lapack_lite.zungqr routine_name = 'zungqr' else: lapack_routine = lapack_lite.dorgqr routine_name = 'dorgqr' # determine optimal lwork lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # compute q lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) q = _fastCopyAndTranspose(result_t, q[:mc]) r = _fastCopyAndTranspose(result_t, a[:, :mc]) return wrap(q), wrap(triu(r)) # Eigenvalues def eigvals(a): """ Compute the eigenvalues of a general matrix. Main difference between `eigvals` and `eig`: the eigenvectors aren't returned. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues will be computed. Returns ------- w : (..., M,) ndarray The eigenvalues, each repeated according to its multiplicity. They are not necessarily ordered, nor are they necessarily real for real matrices. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eig : eigenvalues and right eigenvectors of general arrays eigvalsh : eigenvalues of symmetric or Hermitian arrays. eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. Examples -------- Illustration, using the fact that the eigenvalues of a diagonal matrix are its diagonal elements, that multiplying a matrix on the left by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) (1.0, 1.0, 0.0) Now multiply a diagonal matrix by Q on one side and by Q.T on the other: >>> D = np.diag((-1,1)) >>> LA.eigvals(D) array([-1., 1.]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) array([ 1., -1.]) """ a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->D' if isComplexType(t) else 'd->D' w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) if not isComplexType(t): if all(w.imag == 0): w = w.real result_t = _realType(result_t) else: result_t = _complexType(result_t) return w.astype(result_t, copy=False) def eigvalsh(a, UPLO='L'): """ Compute the eigenvalues of a Hermitian or real symmetric matrix. Main difference from eigh: the eigenvectors are not computed. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Irrespective of this value only the real parts of the diagonal will be considered in the computation to preserve the notion of a Hermitian matrix. It therefore follows that the imaginary part of the diagonal will always be treated as zero. Returns ------- w : (..., M,) ndarray The eigenvalues in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. eigvals : eigenvalues of general real or complex arrays. eig : eigenvalues and right eigenvectors of general real or complex arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues are computed using LAPACK routines _syevd, _heevd Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) array([ 0.17157288, 5.82842712]) >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a array([[ 5.+2.j, 9.-2.j], [ 0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() >>> # with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b array([[ 5.+0.j, 0.-2.j], [ 0.+2.j, 2.+0.j]]) >>> wa = LA.eigvalsh(a) >>> wb = LA.eigvals(b) >>> wa; wb array([ 1., 6.]) array([ 6.+0.j, 1.+0.j]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigvalsh_lo else: gufunc = _umath_linalg.eigvalsh_up a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->d' if isComplexType(t) else 'd->d' w = gufunc(a, signature=signature, extobj=extobj) return w.astype(_realType(result_t), copy=False) def _convertarray(a): t, result_t = _commonType(a) a = _fastCT(a.astype(t)) return a, t, result_t # Eigenvectors def eig(a): """ Compute the eigenvalues and right eigenvectors of a square array. Parameters ---------- a : (..., M, M) array Matrices for which the eigenvalues and right eigenvectors will be computed Returns ------- w : (..., M) array The eigenvalues, each repeated according to its multiplicity. The eigenvalues are not necessarily ordered. The resulting array will be of complex type, unless the imaginary part is zero in which case it will be cast to a real type. When `a` is real the resulting eigenvalues will be real (0 imaginary part) or occur in conjugate pairs v : (..., M, M) array The normalized (unit "length") eigenvectors, such that the column ``v[:,i]`` is the eigenvector corresponding to the eigenvalue ``w[i]``. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvals : eigenvalues of a non-symmetric array. eigh : eigenvalues and eigenvectors of a symmetric or Hermitian (conjugate symmetric) array. eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) array. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. The number `w` is an eigenvalue of `a` if there exists a vector `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. The array `v` of eigenvectors may not be of maximum rank, that is, some of the columns may be linearly dependent, although round-off error may obscure that fact. If the eigenvalues are all different, then theoretically the eigenvectors are linearly independent. Likewise, the (complex-valued) matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate transpose of `a`. Finally, it is emphasized that `v` consists of the *right* (as in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, and, in general, the left and right eigenvectors of a matrix are not necessarily the (perhaps conjugate) transposes of each other. References ---------- G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, Various pp. Examples -------- >>> from numpy import linalg as LA (Almost) trivial example with real e-values and e-vectors. >>> w, v = LA.eig(np.diag((1, 2, 3))) >>> w; v array([ 1., 2., 3.]) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) Real matrix possessing complex e-values and e-vectors; note that the e-values are complex conjugates of each other. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) >>> w; v array([ 1. + 1.j, 1. - 1.j]) array([[ 0.70710678+0.j , 0.70710678+0.j ], [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) Complex-valued matrix with real e-values (but complex-valued e-vectors); note that a.conj().T = a, i.e., a is Hermitian. >>> a = np.array([[1, 1j], [-1j, 1]]) >>> w, v = LA.eig(a) >>> w; v array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], [ 0.70710678+0.j , 0.00000000+0.70710678j]]) Be careful about round-off error! >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) >>> # Theor. e-values are 1 +/- 1e-9 >>> w, v = LA.eig(a) >>> w; v array([ 1., 1.]) array([[ 1., 0.], [ 0., 1.]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->DD' if isComplexType(t) else 'd->DD' w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) if not isComplexType(t) and all(w.imag == 0.0): w = w.real vt = vt.real result_t = _realType(result_t) else: result_t = _complexType(result_t) vt = vt.astype(result_t, copy=False) return w.astype(result_t, copy=False), wrap(vt) def eigh(a, UPLO='L'): """ Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. Returns two objects, a 1-D array containing the eigenvalues of `a`, and a 2-D square array or matrix (depending on the input type) of the corresponding eigenvectors (in columns). Parameters ---------- a : (..., M, M) array Hermitian/Symmetric matrices whose eigenvalues and eigenvectors are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Irrespective of this value only the real parts of the diagonal will be considered in the computation to preserve the notion of a Hermitian matrix. It therefore follows that the imaginary part of the diagonal will always be treated as zero. Returns ------- w : (..., M) ndarray The eigenvalues in ascending order, each repeated according to its multiplicity. v : {(..., M, M) ndarray, (..., M, M) matrix} The column ``v[:, i]`` is the normalized eigenvector corresponding to the eigenvalue ``w[i]``. Will return a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvalsh : eigenvalues of symmetric or Hermitian arrays. eig : eigenvalues and right eigenvectors for non-symmetric arrays. eigvals : eigenvalues of non-symmetric arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues/eigenvectors are computed using LAPACK routines _syevd, _heevd The eigenvalues of real symmetric or complex Hermitian matrices are always real. [1]_ The array `v` of (column) eigenvectors is unitary and `a`, `w`, and `v` satisfy the equations ``dot(a, v[:, i]) = w[i] * v[:, i]``. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 222. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(a) >>> w; v array([ 0.17157288, 5.82842712]) array([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair array([ 0.+0.j, 0.+0.j]) >>> A = np.matrix(a) # what happens if input is a matrix object >>> A matrix([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(A) >>> w; v array([ 0.17157288, 5.82842712]) matrix([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a array([[ 5.+2.j, 9.-2.j], [ 0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b array([[ 5.+0.j, 0.-2.j], [ 0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) >>> wa; wb array([ 1., 6.]) array([ 6.+0.j, 1.+0.j]) >>> va; vb array([[-0.44721360-0.j , -0.89442719+0.j ], [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]]) array([[ 0.89442719+0.j , 0.00000000-0.4472136j], [ 0.00000000-0.4472136j, 0.89442719+0.j ]]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigh_lo else: gufunc = _umath_linalg.eigh_up signature = 'D->dD' if isComplexType(t) else 'd->dd' w, vt = gufunc(a, signature=signature, extobj=extobj) w = w.astype(_realType(result_t), copy=False) vt = vt.astype(result_t, copy=False) return w, wrap(vt) # Singular value decomposition def svd(a, full_matrices=1, compute_uv=1): """ Singular Value Decomposition. Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` are unitary and `s` is a 1-d array of `a`'s singular values. Parameters ---------- a : (..., M, N) array_like A real or complex matrix of shape (`M`, `N`) . full_matrices : bool, optional If True (default), `u` and `v` have the shapes (`M`, `M`) and (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) and (`K`, `N`), respectively, where `K` = min(`M`, `N`). compute_uv : bool, optional Whether or not to compute `u` and `v` in addition to `s`. True by default. Returns ------- u : { (..., M, M), (..., M, K) } array Unitary matrices. The actual shape depends on the value of ``full_matrices``. Only returned when ``compute_uv`` is True. s : (..., K) array The singular values for every matrix, sorted in descending order. v : { (..., N, N), (..., K, N) } array Unitary matrices. The actual shape depends on the value of ``full_matrices``. Only returned when ``compute_uv`` is True. Raises ------ LinAlgError If SVD computation does not converge. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The decomposition is performed using LAPACK routine _gesdd The SVD is commonly written as ``a = U S V.H``. The `v` returned by this function is ``V.H`` and ``u = U``. If ``U`` is a unitary matrix, it means that it satisfies ``U.H = inv(U)``. The rows of `v` are the eigenvectors of ``a.H a``. The columns of `u` are the eigenvectors of ``a a.H``. For row ``i`` in `v` and column ``i`` in `u`, the corresponding eigenvalue is ``s[i]**2``. If `a` is a `matrix` object (as opposed to an `ndarray`), then so are all the return values. Examples -------- >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) Reconstruction based on full SVD: >>> U, s, V = np.linalg.svd(a, full_matrices=True) >>> U.shape, V.shape, s.shape ((9, 9), (6, 6), (6,)) >>> S = np.zeros((9, 6), dtype=complex) >>> S[:6, :6] = np.diag(s) >>> np.allclose(a, np.dot(U, np.dot(S, V))) True Reconstruction based on reduced SVD: >>> U, s, V = np.linalg.svd(a, full_matrices=False) >>> U.shape, V.shape, s.shape ((9, 6), (6, 6), (6,)) >>> S = np.diag(s) >>> np.allclose(a, np.dot(U, np.dot(S, V))) True """ a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) m = a.shape[-2] n = a.shape[-1] if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f else: gufunc = _umath_linalg.svd_n_f else: if m < n: gufunc = _umath_linalg.svd_m_s else: gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' u, s, vt = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t, copy=False) s = s.astype(_realType(result_t), copy=False) vt = vt.astype(result_t, copy=False) return wrap(u), s, wrap(vt) else: if m < n: gufunc = _umath_linalg.svd_m else: gufunc = _umath_linalg.svd_n signature = 'D->d' if isComplexType(t) else 'd->d' s = gufunc(a, signature=signature, extobj=extobj) s = s.astype(_realType(result_t), copy=False) return s def cond(x, p=None): """ Compute the condition number of a matrix. This function is capable of returning the condition number using one of seven different norms, depending on the value of `p` (see Parameters below). Parameters ---------- x : (..., M, N) array_like The matrix whose condition number is sought. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional Order of the norm: ===== ============================ p norm for matrices ===== ============================ None 2-norm, computed directly using the ``SVD`` 'fro' Frobenius norm inf max(sum(abs(x), axis=1)) -inf min(sum(abs(x), axis=1)) 1 max(sum(abs(x), axis=0)) -1 min(sum(abs(x), axis=0)) 2 2-norm (largest sing. value) -2 smallest singular value ===== ============================ inf means the numpy.inf object, and the Frobenius norm is the root-of-sum-of-squares norm. Returns ------- c : {float, inf} The condition number of the matrix. May be infinite. See Also -------- numpy.linalg.norm Notes ----- The condition number of `x` is defined as the norm of `x` times the norm of the inverse of `x` [1]_; the norm can be the usual L2-norm (root-of-sum-of-squares) or one of a number of other matrix norms. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, Academic Press, Inc., 1980, pg. 285. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a array([[ 1, 0, -1], [ 0, 1, 0], [ 1, 0, 1]]) >>> LA.cond(a) 1.4142135623730951 >>> LA.cond(a, 'fro') 3.1622776601683795 >>> LA.cond(a, np.inf) 2.0 >>> LA.cond(a, -np.inf) 1.0 >>> LA.cond(a, 1) 2.0 >>> LA.cond(a, -1) 1.0 >>> LA.cond(a, 2) 1.4142135623730951 >>> LA.cond(a, -2) 0.70710678118654746 >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) 0.70710678118654746 """ x = asarray(x) # in case we have a matrix if p is None: s = svd(x, compute_uv=False) return s[..., 0]/s[..., -1] else: return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1)) def matrix_rank(M, tol=None): """ Return matrix rank of array using SVD method Rank of the array is the number of SVD singular values of the array that are greater than `tol`. Parameters ---------- M : {(M,), (M, N)} array_like array of <=2 dimensions tol : {None, float}, optional threshold below which SVD values are considered zero. If `tol` is None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is set to ``S.max() * max(M.shape) * eps``. Notes ----- The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `M`. By default, we identify singular values less than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with the symbols defined above). This is the algorithm MATLAB uses [1]. It also appears in *Numerical recipes* in the discussion of SVD solutions for linear least squares [2]. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there is a column in `M` that is an exact (in floating point) linear combination of other columns in `M`. Computing the SVD on `M` will not produce a singular value exactly equal to 0 in general: any difference of the smallest SVD value from 0 will be caused by numerical imprecision in the calculation of the SVD. Our threshold for small SVD values takes this numerical imprecision into account, and the default threshold will detect such numerical rank deficiency. The threshold may declare a matrix `M` rank deficient even if the linear combination of some columns of `M` is not exactly equal to another column of `M` but only numerically very close to another column of `M`. We chose our default threshold because it is in wide use. Other thresholds are possible. For example, elsewhere in the 2007 edition of *Numerical recipes* there is an alternative threshold of ``S.max() * np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe this threshold as being based on "expected roundoff error" (p 71). The thresholds above deal with floating point roundoff error in the calculation of the SVD. However, you may have more information about the sources of error in `M` that would make you consider other tolerance values to detect *effective* rank deficiency. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [1] MATLAB reference documention, "Rank" http://www.mathworks.com/help/techdoc/ref/rank.html .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, page 795. Examples -------- >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 """ M = asarray(M) if M.ndim > 2: raise TypeError('array should have 2 or fewer dimensions') if M.ndim < 2: return int(not all(M==0)) S = svd(M, compute_uv=False) if tol is None: tol = S.max() * max(M.shape) * finfo(S.dtype).eps return sum(S > tol) # Generalized inverse def pinv(a, rcond=1e-15 ): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate the generalized inverse of a matrix using its singular-value decomposition (SVD) and including all *large* singular values. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. rcond : float Cutoff for small singular values. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Returns ------- B : (N, M) ndarray The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so is `B`. Raises ------ LinAlgError If the SVD computation does not converge. Notes ----- The pseudo-inverse of a matrix A, denoted :math:`A^+`, is defined as: "the matrix that 'solves' [the least-squares problem] :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular value decomposition of A, then :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting of A's so-called singular values, (followed, typically, by zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix consisting of the reciprocals of A's singular values (again, followed by zeros). [1]_ References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pp. 139-142. Examples -------- The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: >>> a = np.random.randn(9, 6) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a, wrap = _makearray(a) _assertNoEmpty2d(a) a = a.conjugate() u, s, vt = svd(a, 0) m = u.shape[0] n = vt.shape[1] cutoff = rcond*maximum.reduce(s) for i in range(min(n, m)): if s[i] > cutoff: s[i] = 1./s[i] else: s[i] = 0. res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) return wrap(res) # Determinant def slogdet(a): """ Compute the sign and (natural) logarithm of the determinant of an array. If an array has a very small or very large determinant, then a call to `det` may overflow or underflow. This routine is more robust against such issues, because it computes the logarithm of the determinant rather than the determinant itself. Parameters ---------- a : (..., M, M) array_like Input array, has to be a square 2-D array. Returns ------- sign : (...) array_like A number representing the sign of the determinant. For a real matrix, this is 1, 0, or -1. For a complex matrix, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. logdet : (...) array_like The natural log of the absolute value of the determinant. If the determinant is zero, then `sign` will be 0 and `logdet` will be -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. See Also -------- det Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. .. versionadded:: 1.6.0 The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logdet) = np.linalg.slogdet(a) >>> (sign, logdet) (-1, 0.69314718055994529) >>> sign * np.exp(logdet) -2.0 Computing log-determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> sign, logdet = np.linalg.slogdet(a) >>> (sign, logdet) (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) >>> sign * np.exp(logdet) array([-2., -3., -8.]) This routine succeeds where ordinary `det` does not: >>> np.linalg.det(np.eye(500) * 0.1) 0.0 >>> np.linalg.slogdet(np.eye(500) * 0.1) (1, -1151.2925464970228) """ a = asarray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) real_t = _realType(result_t) signature = 'D->Dd' if isComplexType(t) else 'd->dd' sign, logdet = _umath_linalg.slogdet(a, signature=signature) if isscalar(sign): sign = sign.astype(result_t) else: sign = sign.astype(result_t, copy=False) if isscalar(logdet): logdet = logdet.astype(real_t) else: logdet = logdet.astype(real_t, copy=False) return sign, logdet def det(a): """ Compute the determinant of an array. Parameters ---------- a : (..., M, M) array_like Input array to compute determinants for. Returns ------- det : (...) array_like Determinant of `a`. See Also -------- slogdet : Another way to representing the determinant, more suitable for large matrices where underflow/overflow may occur. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 Computing determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> np.linalg.det(a) array([-2., -3., -8.]) """ a = asarray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = _umath_linalg.det(a, signature=signature) if isscalar(r): r = r.astype(result_t) else: r = r.astype(result_t, copy=False) return r # Linear Least Squares def lstsq(a, b, rcond=-1): """ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : {(M,), (M, K)} array_like Ordinate or "dependent variable" values. If `b` is two-dimensional, the least-squares solution is calculated for each of the `K` columns of `b`. rcond : float, optional Cut-off ratio for small singular values of `a`. For the purposes of rank determination, singular values are treated as zero if they are smaller than `rcond` times the largest singular value of `a`. Returns ------- x : {(N,), (N, K)} ndarray Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. residuals : {(), (1,), (K,)} ndarray Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If the rank of `a` is < N or M <= N, this is an empty array. If `b` is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,). rank : int Rank of matrix `a`. s : (min(M, N),) ndarray Singular values of `a`. Raises ------ LinAlgError If computation does not converge. Notes ----- If `b` is a matrix, then all array results are returned as matrices. Examples -------- Fit a line, ``y = mx + c``, through some noisy data-points: >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) By examining the coefficients, we see that the line should have a gradient of roughly 1 and cut the y-axis at, more or less, -1. We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: >>> A = np.vstack([x, np.ones(len(x))]).T >>> A array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) >>> m, c = np.linalg.lstsq(A, y)[0] >>> print(m, c) 1.0 -0.95 Plot the data along with the fitted line: >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o', label='Original data', markersize=10) >>> plt.plot(x, m*x + c, 'r', label='Fitted line') >>> plt.legend() >>> plt.show() """ import math a, _ = _makearray(a) b, wrap = _makearray(b) is_1d = len(b.shape) == 1 if is_1d: b = b[:, newaxis] _assertRank2(a, b) m = a.shape[0] n = a.shape[1] n_rhs = b.shape[1] ldb = max(n, m) if m != b.shape[0]: raise LinAlgError('Incompatible dimensions') t, result_t = _commonType(a, b) result_real_t = _realType(result_t) real_t = _linalgRealType(t) bstar = zeros((ldb, n_rhs), t) bstar[:b.shape[0], :n_rhs] = b.copy() a, bstar = _fastCopyAndTranspose(t, a, bstar) a, bstar = _to_native_byte_order(a, bstar) s = zeros((min(m, n),), real_t) nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) if isComplexType(t): lapack_routine = lapack_lite.zgelsd lwork = 1 rwork = zeros((lwork,), real_t) work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, rwork, iwork, 0) lwork = int(abs(work[0])) rwork = zeros((lwork,), real_t) a_real = zeros((m, n), real_t) bstar_real = zeros((ldb, n_rhs,), real_t) results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, bstar_real, ldb, s, rcond, 0, rwork, -1, iwork, 0) lrwork = int(rwork[0]) work = zeros((lwork,), t) rwork = zeros((lrwork,), real_t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, rwork, iwork, 0) else: lapack_routine = lapack_lite.dgelsd lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, iwork, 0) lwork = int(work[0]) work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, iwork, 0) if results['info'] > 0: raise LinAlgError('SVD did not converge in Linear Least Squares') resids = array([], result_real_t) if is_1d: x = array(ravel(bstar)[:n], dtype=result_t, copy=True) if results['rank'] == n and m > n: if isComplexType(t): resids = array([sum(abs(ravel(bstar)[n:])**2)], dtype=result_real_t) else: resids = array([sum((ravel(bstar)[n:])**2)], dtype=result_real_t) else: x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) if results['rank'] == n and m > n: if isComplexType(t): resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( result_real_t, copy=False) else: resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( result_real_t, copy=False) st = s[:min(n, m)].astype(result_real_t, copy=True) return wrap(x), wrap(resids), results['rank'], st def _multi_svd_norm(x, row_axis, col_axis, op): """Compute a function of the singular values of the 2-D matrices in `x`. This is a private utility function used by numpy.linalg.norm(). Parameters ---------- x : ndarray row_axis, col_axis : int The axes of `x` that hold the 2-D matrices. op : callable This should be either numpy.amin or numpy.amax or numpy.sum. Returns ------- result : float or ndarray If `x` is 2-D, the return values is a float. Otherwise, it is an array with ``x.ndim - 2`` dimensions. The return values are either the minimum or maximum or sum of the singular values of the matrices, depending on whether `op` is `numpy.amin` or `numpy.amax` or `numpy.sum`. """ if row_axis > col_axis: row_axis -= 1 y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1) result = op(svd(y, compute_uv=0), axis=-1) return result def norm(x, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- x : array_like Input array. If `axis` is None, `x` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. .. versionadded:: 1.10.0 Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- 'nuc' nuclear norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The nuclear norm is the sum of the singular values. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> LA.norm(a) 7.745966692414834 >>> LA.norm(b) 7.745966692414834 >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) 4.0 >>> LA.norm(b, np.inf) 9.0 >>> LA.norm(a, -np.inf) 0.0 >>> LA.norm(b, -np.inf) 2.0 >>> LA.norm(a, 1) 20.0 >>> LA.norm(b, 1) 7.0 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) 6.0 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) 7.3484692283495345 >>> LA.norm(a, -2) nan >>> LA.norm(b, -2) 1.8570331885190563e-016 >>> LA.norm(a, 3) 5.8480354764257312 >>> LA.norm(a, -3) nan Using the `axis` argument to compute vector norms: >>> c = np.array([[ 1, 2, 3], ... [-1, 1, 4]]) >>> LA.norm(c, axis=0) array([ 1.41421356, 2.23606798, 5. ]) >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) array([ 6., 6.]) Using the `axis` argument to compute matrix norms: >>> m = np.arange(8).reshape(2,2,2) >>> LA.norm(m, axis=(1,2)) array([ 3.74165739, 11.22497216]) >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) (3.7416573867739413, 11.224972160321824) """ x = asarray(x) if not issubclass(x.dtype.type, (inexact, object_)): x = x.astype(float) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) ret = sqrt(sqnorm) if keepdims: ret = ret.reshape(ndim*[1]) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except: raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) else: try: ord + 1 except TypeError: raise ValueError("Invalid norm order for vectors.") if x.dtype.type is longdouble: # Convert to a float type, so integer arrays give # float results. Don't apply asfarray to longdouble arrays, # because it will downcast to float64. absx = abs(x) else: absx = x if isComplexType(x.dtype.type) else asfarray(x) if absx.dtype is x.dtype: absx = abs(absx) else: # if the type changed, we can safely overwrite absx abs(absx, out=absx) absx **= ord return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord) elif len(axis) == 2: row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if not (0 <= row_axis < nd and 0 <= col_axis < nd): raise ValueError('Invalid axis %r for an array with shape %r' % (axis, x.shape)) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum) else: raise ValueError("Invalid norm order for matrices.") if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError("Improper number of dimensions to norm.") # multi_dot def multi_dot(arrays): """ Compute the dot product of two or more arrays in a single function call, while automatically selecting the fastest evaluation order. `multi_dot` chains `numpy.dot` and uses optimal parenthesization of the matrices [1]_ [2]_. Depending on the shapes of the matrices, this can speed up the multiplication a lot. If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it is treated as a column vector. The other arguments must be 2-D. Think of `multi_dot` as:: def multi_dot(arrays): return functools.reduce(np.dot, arrays) Parameters ---------- arrays : sequence of array_like If the first argument is 1-D it is treated as row vector. If the last argument is 1-D it is treated as column vector. The other arguments must be 2-D. Returns ------- output : ndarray Returns the dot product of the supplied arrays. See Also -------- dot : dot multiplication with two arguments. References ---------- .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication Examples -------- `multi_dot` allows you to write:: >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random(10000, 100) >>> B = np.random.random(100, 1000) >>> C = np.random.random(1000, 5) >>> D = np.random.random(5, 333) >>> # the actual dot multiplication >>> multi_dot([A, B, C, D]) instead of:: >>> np.dot(np.dot(np.dot(A, B), C), D) >>> # or >>> A.dot(B).dot(C).dot(D) Example: multiplication costs of different parenthesizations ------------------------------------------------------------ The cost for a matrix multiplication can be calculated with the following function:: def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Let's assume we have three matrices :math:`A_{10x100}, B_{100x5}, C_{5x50}$`. The costs for the two different parenthesizations are as follows:: cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 """ n = len(arrays) # optimization only makes sense for len(arrays) > 2 if n < 2: raise ValueError("Expecting at least two arrays.") elif n == 2: return dot(arrays[0], arrays[1]) arrays = [asanyarray(a) for a in arrays] # save original ndim to reshape the result array into the proper form later ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim # Explicitly convert vectors to 2D arrays to keep the logic of the internal # _multi_dot_* functions as simple as possible. if arrays[0].ndim == 1: arrays[0] = atleast_2d(arrays[0]) if arrays[-1].ndim == 1: arrays[-1] = atleast_2d(arrays[-1]).T _assertRank2(*arrays) # _multi_dot_three is much faster than _multi_dot_matrix_chain_order if n == 3: result = _multi_dot_three(arrays[0], arrays[1], arrays[2]) else: order = _multi_dot_matrix_chain_order(arrays) result = _multi_dot(arrays, order, 0, n - 1) # return proper shape if ndim_first == 1 and ndim_last == 1: return result[0, 0] # scalar elif ndim_first == 1 or ndim_last == 1: return result.ravel() # 1-D else: return result def _multi_dot_three(A, B, C): """ Find the best order for three arrays and do the multiplication. For three arguments `_multi_dot_three` is approximately 15 times faster than `_multi_dot_matrix_chain_order` """ a0, a1b0 = A.shape b1c0, c1 = C.shape # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 cost1 = a0 * b1c0 * (a1b0 + c1) # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 cost2 = a1b0 * c1 * (a0 + b1c0) if cost1 < cost2: return dot(dot(A, B), C) else: return dot(A, dot(B, C)) def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ Return a np.array that encodes the optimal order of mutiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. Also return the cost matrix if `return_costs` is `True` The implementation CLOSELY follows Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. cost[i, j] = min([ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) for k in range(i, j)]) """ n = len(arrays) # p stores the dimensions of the matrices # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] # m is a matrix of costs of the subproblems # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} m = zeros((n, n), dtype=double) # s is the actual ordering # s[i, j] is the value of k at which we split the product A_i..A_j s = empty((n, n), dtype=intp) for l in range(1, n): for i in range(n - l): j = i + l m[i, j] = Inf for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index return (s, m) if return_costs else s def _multi_dot(arrays, order, i, j): """Actually do the multiplication with the given order.""" if i == j: return arrays[i] else: return dot(_multi_dot(arrays, order, i, order[i, j]), _multi_dot(arrays, order, order[i, j] + 1, j))
mit
ray-project/ray
python/ray/tune/utils/visual_utils.py
4
2077
import pandas as pd from pandas.api.types import is_string_dtype, is_numeric_dtype import logging import os import os.path as osp import numpy as np import json from ray.tune.utils import flatten_dict logger = logging.getLogger(__name__) logger.warning("This module will be deprecated in a future version of Tune.") def _parse_results(res_path): res_dict = {} try: with open(res_path) as f: # Get last line in file for line in f: pass res_dict = flatten_dict(json.loads(line.strip())) except Exception: logger.exception("Importing %s failed...Perhaps empty?" % res_path) return res_dict def _parse_configs(cfg_path): try: with open(cfg_path) as f: cfg_dict = flatten_dict(json.load(f)) except Exception: logger.exception("Config parsing failed.") return cfg_dict def _resolve(directory, result_fname): try: resultp = osp.join(directory, result_fname) res_dict = _parse_results(resultp) cfgp = osp.join(directory, "params.json") cfg_dict = _parse_configs(cfgp) cfg_dict.update(res_dict) return cfg_dict except Exception: return None def load_results_to_df(directory, result_name="result.json"): exp_directories = [ dirpath for dirpath, dirs, files in os.walk(directory) for f in files if f == result_name ] data = [_resolve(d, result_name) for d in exp_directories] data = [d for d in data if d] return pd.DataFrame(data) def generate_plotly_dim_dict(df, field): dim_dict = {} dim_dict["label"] = field column = df[field] if is_numeric_dtype(column): dim_dict["values"] = column elif is_string_dtype(column): texts = column.unique() dim_dict["values"] = [ np.argwhere(texts == x).flatten()[0] for x in column ] dim_dict["tickvals"] = list(range(len(texts))) dim_dict["ticktext"] = texts else: raise Exception("Unidentifiable Type") return dim_dict
apache-2.0
namiszh/fba
WebApp/app/compute.py
1
12890
#!/usr/bin/env python import pandas as pd from pandas import DataFrame from scipy.stats import rankdata import re import matplotlib.pyplot as plt import os import numpy as np from io import BytesIO import base64 # web application directory WEB_APP_ROOT = os.path.abspath(os.path.dirname( __file__ )) # print(WEB_APP_ROOT) DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname( WEB_APP_ROOT ), 'data')) # print(DATA_ROOT) def data_to_ranking_score(values, reverse = False): ''' Given a list of value, return a list of ranking score. If reverse = False, the biggest value will get the biggest ranking score. If reverse = True, the biggest value will get the smallest ranking score. Only category 'TO' should set 'reverse' to 'True'. The smallest ranking score is 1, the biggest ranking score is the element number of this list. ''' scores = rankdata(values) if reverse: scores = [(len(values) + 1 - score) for score in scores] return scores; def parse_data_file(data_file_name): '''The method reads a league's raw data of for week or total season from a csv file, then convert these data to scores and output to another csv file. ''' df = pd.read_csv(data_file_name, encoding = "ISO-8859-1") headers = list(df) for header in headers[1:]: reverse = (header == 'TO') df[header] = data_to_ranking_score(df[header], reverse) # add a column 'Total', its value df['Total'] = df[headers[1:]].sum(axis=1) # replace 'data' in file name with score score_file_name = re.sub(r'(_week\d+_)data(\.csv$)', r'\1score\2', data_file_name) print('write result to', score_file_name) df.to_csv(score_file_name, index=False) def get_week_csv_file(league_name, week, file_type='score'): file_name = '{}_week{}_{}.csv'.format(league_name, week, file_type) # print('week {} score file name of league "{}": "{}"'.format(week, league_name, file_name)) file_path = os.path.abspath(os.path.join(DATA_ROOT, file_name)) print('week {} score file path of league "{}": "{}"'.format(week, league_name, file_path)) return file_path def get_week_score_png(names, scores, title): pos = list(range(1, len(names)+1)) print(pos) width = 0.3 # Plotting the bars fig, ax = plt.subplots(figsize=(20,12)) # Create a bar with week score, # in position pos, plt.bar([p + width for p in pos], scores, # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224', edgecolor='red', # with label the first value in first_name label='Week') # Set the y axis label ax.set_ylabel('Score') # Set the chart's title ax.set_title(title) # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(names, rotation=60) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim(0, 180 ) # Adding the legend and showing the plot # plt.legend(['Week', 'Season'], loc='upper right') plt.grid(True) figfile = BytesIO() plt.savefig(figfile, format='png') figfile.seek(0) # rewind to beginning of file figdata_png = base64.b64encode(figfile.getvalue()) figdata_png = figdata_png.decode('utf8') return figdata_png def compute_png_svg(): """Return filename of plot of the damped_vibration function.""" t = np.arange(0.0, 2.0, 0.01) s = 1 + np.sin(2*np.pi*t) plt.plot(t, s) plt.xlabel('time (s)') plt.ylabel('voltage (mV)') plt.title('About as simple as it gets, folks') plt.grid(True) # Make Matplotlib write to BytesIO file object and grab # return the object's string figfile = BytesIO() plt.savefig(figfile, format='png') figfile.seek(0) # rewind to beginning of file figdata_png = base64.b64encode(figfile.getvalue()) figdata_png = figdata_png.decode('utf8') # figdata_png="iVBORw0KGgoAAAANSUhEUgAAASwAAACWCAIAAADrOSKFAAAYCklEQVR42u2de4xj91XHz/n97sv2jO2Z3dn1zGQn2byTbbLZNk2TlhZEU5S+CLRIVCog/qGAhIAWFJAKFFrUClFEhcRDKi0IEKogIBFahAqt1FcepGmazeadTXZnNzuzuzM7fo3t+/od/rger8f22HfGP/vO7pyP9o9Zj+/19R1/fX6/c87v+8MvnSZgGCY5RNIXwDB7HRYhwyQMi5BhEoZFyDAJwyJkmIRhETJMwrAIGSZhWIQMkzAsQoZJGBYhwyQMi5BhEoZFyDAJYyR9AXsLIkAExFjP3NbzAYAAgJqHAEK8gzYfxSQBi3CsIIJbqzYq5UFLV8hKpVOTeUSsV0purQoxNIWIqWzOctKkVK1c9N16nKOEEOn8tGFaSd+bvQuLcKwICcf/5z++/jd/QkR99KHC8K4HPvy+3/gDaZjf+8oXH/2XL0sp+5+ZCAzL+sAnPn3n/e9v1Btf+8KnXnr0G0IMPIrSuamPfOav5299E/GatoRgEY4bt7ZeXH6DSPUJUyoMaqVLQAAI9XKpuHRWGIP+UkSm7Xj1OgAQ0fraanHpDRwkXSDlu43Q9wCjgSmTACzCBEAUAIBbT8JIieg5AAAIKFCIzSk06opbiIBtp0TsPop6BDuBKHg6mCwswoQhIlJhhzpUGCgVAgACWE4aAImopbDmz0Sk1KYzKRXJTAhhpdJE0H4UECEibT6KSJEKgUeiicIiTBIislLpIz/6Xntisl0JpNR1x+5FIQDgrgc+fPLJ77729KMAol2HC3fcffu7fgKFiIaRBCClnLvlDlJgOql7f+YX33jhmQunXmkdRQCIeMt9P379W97eei0Cspx0rjDPMkwQFmGiEDkT2ff86u/sv+Ywkdr0KxSISASFG27+yYc++/Cnf/Ps8z8E0dIhlS8uFW687Y773wuAremcUqAUIMINd7/jg5/4zL9/7qG1pUXE5syQlKqsnL/pbe+67q43t7+aCoEUj0kTQz748T9M+hr2ECjg9PGnXnr0mwDNIaWdmbznp34uMzUVzeEu/0OMRAgAuQOFA4dvWTzx/eraCkD0G6xXimee/cHMwo37D11PCpRqZlYQgQgQYObaG7IzB19/+gm3Vo0OAYDyyvLSy89dc9uxyf0HSQGpZlBkBSYId8zsDrYYDUaKIoLr33LfB3/rj6fnrm0FTESxevbUI3/2+6/94LGooN8aUm78jEff8+ADv/bJdDavlIKmfMXiiace+fwnL7z+alTTZxKHRbjbaSqK4NZ3vPsDH/+j7P7CZUUJcf7ki4/86SfPPv9MM8e5WYco5N0f/Mj9H/ttJz1BbTo8+eR3H/n87106d6ZdukxS8HB0rPQcjr71wY+mc/l+R0WKQjxw+OZMfvrUD5/w6rXmCBOxdHHpwusvX3vHWyem93UfJaScv/UOIjr97PdV6LfGpSuLr5UvLh8+dp+TyXDPWrKwCMdKtwidiezbPvTRzFQeEVBc/hdVz4lAtB5EEBJnbzpipydPH3/SdxsbisK1pTOrb5w6fNfb0tlcq920dZRhGoeOHPMbjbMvPKPCoKXDC6+9XK8UDx+710qlWIcJwtnRREF0a9XHH/6HTH5feyWdSM3eePtN9/2YEHLp1RdXz7wGG7V7RJyY3j9/652vPvmdqAwYKeql733jq3/+qQcf+lz2QEEFwdkXjldWzreEhYgHb7h1/8L1S688Dxu5HSL11H9+xclMvudXftfJTCR9L/YuLMIkQcRGtfLNL3+h43EVBvf89M/feM+7QMqn/+vhb//TX8lNbWsYBbSWbqOfj//vI3O33PHuX/pE4Hnf/se/fP5b/y3a29YQQ//ycDQ6KvT9xx/++0NH3nzsfR/qKJEwY4NFuDu5PDYMfderVYVh9noStv8cBn5l5TwpIiDfbbi1qpBGn0Oi//tuo7p2EZFbRxODRZgI1LOPs+33m35LsZ7VbIrpd0CPQzZWHzLJwSIcN9K07Mxk/8qACgPTdqLcjGHZzkRWysF/KRUGpm0DACJaTsqZyA5cygQAgCh5MWGiIG+NNmbWls5ceP3l/s8hovzB+YPX34JCXDj1yqU3TmOM3CURTc0tHLjuJlLq3MsnqpcuxjkKAA4cvnlq9lDSN2bvwiIcK0QQd+UQNXtltrXSKCrrA2xvjEnEvaNJwsPRsYIb1b8RPb9FS43beCEmIbhtjWEShkXIMAnDImSYhOE54Q4hAoGAYq/6IyGQAsUdpzpgEe4QgXBqae3UG2sxywBXGUR03fzUdbNTe/MrSC8swh2CCKfOrX3tOy8mfSGJ8f533Xp4boqXIw4Pzwl3TtqxpNyjN1BKkXa4z0YPHAl3CAGkHVMKEYZqr41IiUgKkXZMjoJa2KNf5FpI2aaUe0t+LaTElG0Ofx4GWITDkHZMQ+zRG2gIkXZYhHrg4ehOIXBswzAEJbQQiMabEmkfchOAYQjHNvZoeUY3LMIdQgCGFCnbBKqPX4VEJKRITWVw9KE4cH23XN/sqA8p2zSkYA1qgUW4c6QUqeSGZELKqUMzctThCGF9peJWGh1d5GnH3LOZYe2wCHeObM6LEhqQIoAAxNhb8u70VShUXes4KOWYcq/Oh7XD93HnSImpZNP0Y3htgjAINznBtcozezUzrB0W4c4xhEg71lWenCBSQQhtiZloh6eUY+3ZzLB2+D7uHBSQsg0UOOZE5TghgkiEbY8QCkzZBvJnRxN8I4eAmk0zSV/HKN8ikQo6DUmbk+Gr9ptn3FzNH6BRQwBpxzKu6qkRbQxH2zEkph2LNagLFuFQXP1JQkVhr0iYYG3m6oNLFEMRv1xGRGpHlk29zwYEikICUNoCEiKILotupRSFnZFQSu5Z0wmLcAjaGkf6D0mjdpN3HM7eXkjr0SGRNI39105J09BSqUDAkyXv8fO1UFG7DlWgOtJOba1Co7inexEW4c653EJZHlyuV0Q/cn32l98+F2iKXUKIXDa7eaOYofi3k6XHl2udlx2qrkL95aZZRgtX9Xxm9Egh4q3oQaWgVO/+RO8UItJdGCl5yu86pQrC7hHvXl4+Mgr4Vg6FITHO7Cjywy41Aj/UI5xRRKGyGxJ12gCrze0yEXt5IeUoYBEORZQnpHiiKNW1iVA7gaKSp6BrWN0tQgK66qujY4Zv5VDEzxMiQLEe+OEu3YnTV1T2VHd0iyr1HSlTXkKhF76VQyEQ0o6FEKtzrdwIfUW7cxjnKyp7YUccJEVhd88aYMqxxO58G1cmLMJhSdlGnAkSIpQbgRfs0uGor6jkhR1vo2e7TLR2JOnrvapgEQ5F1LkWc4LUCFS1K9rsEryQ1v3OofKWjaM2+6zphEU4LKnYEyQ/pFI90HLHETWv5q34yusuYPaOhCKd4kioEy7WD0vaMWWMGRIC+CFdqPo1X4U66vVCCDMgI17fGgLYEsXW/qhlT/mqU9WkSIXdkZDNDjXDIhwOgpRjGvEioRuov31s6asnVjVIkEBa8sANc3Ha1ohgypG/fue+vL3lFvZlL/RVj0o9dT1oRM46PB7VB4twKAjAMqRtGQPbRxExVHTi3PqzsK7hdQkM2yhYWSOG0ZMCuClnhX3zt2VPBT1E2KNx1LYMy5CsQY2wCIelmS2M4fYUTeS0QERSoESQcaaGRFO2NPuOmUtej/JJGISd6z6I3WX0w4mZYYmyhUlfRV8IspboL8KypxR1FuUpVD161rhdRjd8N4clch+N2bmWCASQtaSxtQjDqFLf/XjXnLDZs8btMlrhuzksUsTq4U6W/pFw6561EKi7Z82Kkw1m4sMiHBYpMH7n2vghIgTIWrLPc/yN7u2OA1WwqWrR7FmzY5VkmPiwCDWQcgyxiz+XhsCs1e8P7SsoeSF2NY52V+qFwJTDyTzNsAg1sMunSabA3AARUqUrEgJ1dm8Db9A7GvhbbVgIIGVbUmCg64REMLgAD9HKeiLonxMiAEMMGI5WfeV1LXTs2ThqCOQNerXDItRA2jENKVwdp4osoTKWsI0BoVVaxj7HkH3VFZ0wZ8s+vTKwRbtMz+GolDHtPJhtwCIcGtJcOhMIv3BP4b23TffrcSGQUkzN7JNSDqyOGIjzE/3+0GVPdVfqVai6e9bYe3sUsAiHhQBsU1qm1LVDWqAoY4nbC+k+fd4EIIXIZi0h5XbO3ZuSF/bqWetslyEAa+OdMhphEWpAtnqah1chogqpWA8C1a/kQUSI2gJSFAk7HuxuHG3G/F2cgrpC4RuqAY2O1AgACOV6GIzREqrshSFBx1dIT5+1XZ4HvkLhG6oB2cwZapONRnPEgShq1ic21QmxxzqmqGfN2MUV0SsUFqEGDK2b10e+bN64fNmidplOYRF07wMD27ERYOLDN1QDQmC0kGL4zrWoy6xYD/xwTL5svoLu7m3q2qA3emtpx9zNvUFXKCzCYYnSMSnHFFoWCyICQqUReuMajjZtf7uM1noZW4i0bbIEtcMiHJboQ5mObXIR52yNQFXdYDy+bL6iSrfZYc9KvcAU96yNAC5RaIAAUo4lpQg0TeQCRZVGaCCGWyV7EIUmt7VaoOo9etagt88a96yNABahHjRuz4AAbqAeO1WuB0ptLWppiOmDphxk92IKvH3KzphbXls9oO7WnK0cR9n2dxSwCPWgd6MiN6AvPrbUvxggLaNwu2/YZp9sEAHsd4y/eOdsHxGuNgKvKwlEoer+ApDRFlQcCnXDItQAETiWYRp6OtcQkYga/qC1EYRVXxlC9dmDWxFM2+QY/S6q6PZolwm7tiVs95Vj9MIi1INs7SCtJ0U66CxEkXUbQueOgh3Py1lxfNY6H+zRLrMdh1VmW7AI9RBzt9CIgZvsYhwdxnolyNkDfNYqngoUdQylVa/WVTa2GBEsQj00e7hjhEIisg0xacs+Mqt6Yd0Ltegwa8k+KxOJmj5r2GOD3s7ncuPoiGAR6iFaaBdnvqQI7pqfeOj+Q2lT9oyIQuDfPbH8z09dkP1HmjGgwT5r0G3xBFusY+INekcEi1APrV37oqXx/Z+siBam7JzTO8lhCFyYsgVCnLjah0jhOUv2aeXpaXbYqk909KylHFNK3JWeclc2LEI9IEDajtW5FrWGugHRFssulKKcY5gSh1/NNNBnLei1QW/PHdEERs6OXKHQD48utBF3yoRQcUPX37IMTwA5xzB1DPwMAYPMDntGQgjDXu4yXKkfDSxCPTQ71+Ipxwup7IZ9omYuJU2Jw8ccU2CurxNUPaBawBv0JgyLUBspJ97m9QBBSKV6P4fEXMowh+6/IQBTYH+zw54+a6B4q/qxwiLURvzN6/1QlRpbipAIJm050PIwDgYOmBOWevqsKUVht+OoNgsPpgNOzGiCIGUbhhRxEppeSKV60OdptiEylgwVgdhJ4T5KZioCS2Kmr5jLXo8F9N3tMgRgGMKJsSEpswNYhHqgDZOLtXJ9oAoDRaVGCFukGgkgbckHbpsSApZKXrkRBIpaC5e2EmTLt5sIBMKEKQ+m5TvnMhPm4EjY8aAKVGcdgiBlm9FXDKMdFqE2pIjnTo2oQirVgz7LlFKm+NjbZ3/22IHXVuvHz60fP7f+4vnaxaq/7oVA1L45L0VNcASAkDbEPkfenLePTNtHpu3DWSs/aIPeshdGCm+n51b17C4zOliE2ohpfBiZGpbqga+oz2IlU4qZCXFg0rz3umzNUyvr/osXasfPrT97rnpypbFW8wNFIYEjIGcZh7PmkWnnyLR9c97e70hbxtqYm4jKnqKu6Npjl+yoZ43bZUYDi1AbbcaHAwSAAKVG6IfKEP1SlwSgFCCCY4pDefvaKfv+m6cqbrBU8k4srb+w4s5cl3/TgfRtU3YhbWRMsV2Tm0BBuXfPmuqeE6Z4q/qRwSLUxrbK2aV64IeUHrREtiUrAggUIeKkbeQOGrcdTIeAuVzeNHZug+8TlbvcZaDLZy16+bTNkXBU8G3VhkBIO2bMLXuL27f3balCEQSKBOKQ64r8Xj5r3e4yRIQCU47Jy5hGBItQJ+l4YzaMTA2DMdn7bkXUOLpV93Y7kouEo4RFqA1qOs3EuqVuoNY9FcWWpNYluCGt9+5Z62yXMSSmHYvrEyOCRaiTmBsVIkAjUM+frxXrIREZoq3kMJZPuiJa99VLa1496PL5VhSyz9p44cSMTtKOZcRLIdY89dmvL/7r0xePzmeOzk3cWkjPZMy0JQCBNO4s0wYReYpWG+HLRe/EauPEpcarRa/atZiDFHX3rGncdorphkWoD4KUbcQZjkZ+amt1/4nT/hOnyxlLzExYtxXSR+cmjs5nDk87uZRhSVQbgZFo50vsQ0UVX52u+CcuNU6sNl5ccy/Ww/VAAUWO+13GFmGvnrWWjRUzAliE2iAA05COZRapMVAzzWo6AhHVfXV6rfH6pcbXX1zLOvJQ3r5zbuLofOZNs5lC1pqwpBCXw2Oc3lQiqod0oRa8sOY+u9p47pJ7puqXPdVqfxMAW6VWw14+a45tGAb3rI0KFqFOpBQpZ3u3dEOMIBCIqFgP1mrBM+fWH/6hmM4YN8+kj85njs5P3DSTmk4bjiFo63mjr6johidL3olLjROr7stFd80NGwFBFPEA4mwtqIIeRqYpLhKOEhahTqTAtL3zLVPaw6MXqqWyd67kfetkcdKWhax1x2zmzrmJO+cy10zZWVtKBMRmiuWN9eC5S40Tq+5zl9zlmr/uK7Ux2hS4vUUYPRtH044Zc67L7AAWoU42tkwZ1gO4FR4jQVbd8JUL9Zcu1B85sZpPGYf3OUfnJo4tZPfPGS+U/GdXG6fKftELvfDyYosdG4T2WsdEMbO+zM5gEepE75a9EU1BIiBBqGhl3b9Y9f/vdCX77GrhSOgJQRtBL17b9gA6fNYi2HF0pLAIddLeuabHQruNDTFeTufUA2XaUuM2hqQo7O5ZA0w7lkBQnJkZDfz1ppmUMyaveB1hrxOCXj1rElM2f1mPEBahTuhKHrmRUkHDDz2/43EpBPesjRT+htNM2jalRD8Y/kzjIOoUDRq+W224lbq37gZulwjZcXTEsAg1E999NEFUqEIv8NYbbqXhVuuB67cv5O0Y50aLlZO+5KsZFqFWCNJOrM618VxMe84mSrr4NdetNrxqw6u5oR+0lwS3mmM2U748Hh0ZLEKdEIBpGrZpaNosdDgQAEAFYeD6brURaS9wfbW5P7t/focAbMuwDMkaHB0sQs0YAh3bUBuWoaBru8/toJRSXujVXLdadysNv+51lOAHXlLLQFEpStnsLjNaWISasSzjvqML2Yy9tFKprLueHxJR9JkfqRqjFItf971q3a02vHU39IP2oBdDeBAZoUZqtS05mbHnZrJHbjhom/w5GSF8c3VCBIYUd98+f+dNs+X1xrkL5cWl4uL54sW19XrDD0LVKu7pESRilGJxqw23WveqjaDhq1DtJOgREJAhRdqxDkxnFgr5hdn87Ew2m7EtQxDgMMupmP7gl07zaF8nrQ8rAiBCqKju+qvF2pnl0uJy8ez5UqnaaHgBtH2mdyBIIhKGnJjJBg0/foql/fD2q3UsIz+ZuqaQWyjkDxVy09m0YxtCILTWT7ECRwmLcIRcFiQCAPmBqta85ZXK4nJxcam4vFpdr3tBELZUs7M9J1rED3oAZBpyIm0X9k8uFHILs1OFfROZlGUYAnSsJGa2BYtw3CACETXcsFipnz1fWlwunlkuXSrX6g2fiCD2eDVSVPwUCxFFRv378ulDhdxCIX/NwVxuwrEticibYCcJi3DctIdHBAhCVWv4Fy6tLy4VF5eL5y6Wo3QOAOw4ndM+2gQA25LZjDN/IHuokF+Yzc9MZVKOYQhBPNrcHbAIdwXReNX1VFs6p7SyVq1tJ53TnmIxpUynzAPTE4cK+YVCfm5mcjJjW6YA4KC362AR7gp6pXOC1eJ6ezrH9QLqSue0BT1CRMc285POoYO5hdn8oUJ+KptK2QYip1h2NSzCXcfmdA74garW3OWVyuJyaXGpuLxaWa97fhC29kczTTmRsmZnJhcK+YVC/uC+iUzKMtvcaFh4uxwW4ZVBM53jhcVK/exGeBQCDx3ML8zmrzmQzU44jiWBUyxXICzCK4PudE7dDQCAUyxXAdwxc2VweY80AgIQQmRSTVu3dtcJVuCVyO5YdMMwexgWIcMkDIuQYRKGRcgwCcMiZJiEYREyTMKwCBkmYViEDJMwLEKGSRgWIcMkDIuQYRKGRcgwCcMiZJiE+X/1Cq+Z+KX4xAAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAAASUVORK5CYII=" # svg # figfile = BytesIO() # plt.savefig(figfile, format='svg') # figfile.seek(0) # figdata_svg = '<svg' + figfile.getvalue().split('<svg')[1] # figdata_svg = base64.b64encode(figfile.getvalue()) return figdata_png if __name__ == '__main__': for i in range(0,3): data_file_name = get_week_csv_file("Never Ending", i, "data") # print(data_file_name) parse_data_file(data_file_name)
mit
numenta/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/colors.py
69
31676
""" A module for converting numbers or color arguments to *RGB* or *RGBA* *RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the range 0-1. This module includes functions and classes for color specification conversions, and for mapping numbers to colors in a 1-D array of colors called a colormap. Colormapping typically involves two steps: a data array is first mapped onto the range 0-1 using an instance of :class:`Normalize` or of a subclass; then this number in the 0-1 range is mapped to a color using an instance of a subclass of :class:`Colormap`. Two are provided here: :class:`LinearSegmentedColormap`, which is used to generate all the built-in colormap instances, but is also useful for making custom colormaps, and :class:`ListedColormap`, which is used for generating a custom colormap from a list of color specifications. The module also provides a single instance, *colorConverter*, of the :class:`ColorConverter` class providing methods for converting single color specifications or sequences of them to *RGB* or *RGBA*. Commands which take color arguments can use several formats to specify the colors. For the basic builtin colors, you can use a single letter - b : blue - g : green - r : red - c : cyan - m : magenta - y : yellow - k : black - w : white Gray shades can be given as a string encoding a float in the 0-1 range, e.g.:: color = '0.75' For a greater range of colors, you have two options. You can specify the color using an html hex string, as in:: color = '#eeefff' or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in the range [0,1]. Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse' are supported. """ import re import numpy as np from numpy import ma import matplotlib.cbook as cbook parts = np.__version__.split('.') NP_MAJOR, NP_MINOR = map(int, parts[:2]) # true if clip supports the out kwarg NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2 cnames = { 'aliceblue' : '#F0F8FF', 'antiquewhite' : '#FAEBD7', 'aqua' : '#00FFFF', 'aquamarine' : '#7FFFD4', 'azure' : '#F0FFFF', 'beige' : '#F5F5DC', 'bisque' : '#FFE4C4', 'black' : '#000000', 'blanchedalmond' : '#FFEBCD', 'blue' : '#0000FF', 'blueviolet' : '#8A2BE2', 'brown' : '#A52A2A', 'burlywood' : '#DEB887', 'cadetblue' : '#5F9EA0', 'chartreuse' : '#7FFF00', 'chocolate' : '#D2691E', 'coral' : '#FF7F50', 'cornflowerblue' : '#6495ED', 'cornsilk' : '#FFF8DC', 'crimson' : '#DC143C', 'cyan' : '#00FFFF', 'darkblue' : '#00008B', 'darkcyan' : '#008B8B', 'darkgoldenrod' : '#B8860B', 'darkgray' : '#A9A9A9', 'darkgreen' : '#006400', 'darkkhaki' : '#BDB76B', 'darkmagenta' : '#8B008B', 'darkolivegreen' : '#556B2F', 'darkorange' : '#FF8C00', 'darkorchid' : '#9932CC', 'darkred' : '#8B0000', 'darksalmon' : '#E9967A', 'darkseagreen' : '#8FBC8F', 'darkslateblue' : '#483D8B', 'darkslategray' : '#2F4F4F', 'darkturquoise' : '#00CED1', 'darkviolet' : '#9400D3', 'deeppink' : '#FF1493', 'deepskyblue' : '#00BFFF', 'dimgray' : '#696969', 'dodgerblue' : '#1E90FF', 'firebrick' : '#B22222', 'floralwhite' : '#FFFAF0', 'forestgreen' : '#228B22', 'fuchsia' : '#FF00FF', 'gainsboro' : '#DCDCDC', 'ghostwhite' : '#F8F8FF', 'gold' : '#FFD700', 'goldenrod' : '#DAA520', 'gray' : '#808080', 'green' : '#008000', 'greenyellow' : '#ADFF2F', 'honeydew' : '#F0FFF0', 'hotpink' : '#FF69B4', 'indianred' : '#CD5C5C', 'indigo' : '#4B0082', 'ivory' : '#FFFFF0', 'khaki' : '#F0E68C', 'lavender' : '#E6E6FA', 'lavenderblush' : '#FFF0F5', 'lawngreen' : '#7CFC00', 'lemonchiffon' : '#FFFACD', 'lightblue' : '#ADD8E6', 'lightcoral' : '#F08080', 'lightcyan' : '#E0FFFF', 'lightgoldenrodyellow' : '#FAFAD2', 'lightgreen' : '#90EE90', 'lightgrey' : '#D3D3D3', 'lightpink' : '#FFB6C1', 'lightsalmon' : '#FFA07A', 'lightseagreen' : '#20B2AA', 'lightskyblue' : '#87CEFA', 'lightslategray' : '#778899', 'lightsteelblue' : '#B0C4DE', 'lightyellow' : '#FFFFE0', 'lime' : '#00FF00', 'limegreen' : '#32CD32', 'linen' : '#FAF0E6', 'magenta' : '#FF00FF', 'maroon' : '#800000', 'mediumaquamarine' : '#66CDAA', 'mediumblue' : '#0000CD', 'mediumorchid' : '#BA55D3', 'mediumpurple' : '#9370DB', 'mediumseagreen' : '#3CB371', 'mediumslateblue' : '#7B68EE', 'mediumspringgreen' : '#00FA9A', 'mediumturquoise' : '#48D1CC', 'mediumvioletred' : '#C71585', 'midnightblue' : '#191970', 'mintcream' : '#F5FFFA', 'mistyrose' : '#FFE4E1', 'moccasin' : '#FFE4B5', 'navajowhite' : '#FFDEAD', 'navy' : '#000080', 'oldlace' : '#FDF5E6', 'olive' : '#808000', 'olivedrab' : '#6B8E23', 'orange' : '#FFA500', 'orangered' : '#FF4500', 'orchid' : '#DA70D6', 'palegoldenrod' : '#EEE8AA', 'palegreen' : '#98FB98', 'palevioletred' : '#AFEEEE', 'papayawhip' : '#FFEFD5', 'peachpuff' : '#FFDAB9', 'peru' : '#CD853F', 'pink' : '#FFC0CB', 'plum' : '#DDA0DD', 'powderblue' : '#B0E0E6', 'purple' : '#800080', 'red' : '#FF0000', 'rosybrown' : '#BC8F8F', 'royalblue' : '#4169E1', 'saddlebrown' : '#8B4513', 'salmon' : '#FA8072', 'sandybrown' : '#FAA460', 'seagreen' : '#2E8B57', 'seashell' : '#FFF5EE', 'sienna' : '#A0522D', 'silver' : '#C0C0C0', 'skyblue' : '#87CEEB', 'slateblue' : '#6A5ACD', 'slategray' : '#708090', 'snow' : '#FFFAFA', 'springgreen' : '#00FF7F', 'steelblue' : '#4682B4', 'tan' : '#D2B48C', 'teal' : '#008080', 'thistle' : '#D8BFD8', 'tomato' : '#FF6347', 'turquoise' : '#40E0D0', 'violet' : '#EE82EE', 'wheat' : '#F5DEB3', 'white' : '#FFFFFF', 'whitesmoke' : '#F5F5F5', 'yellow' : '#FFFF00', 'yellowgreen' : '#9ACD32', } # add british equivs for k, v in cnames.items(): if k.find('gray')>=0: k = k.replace('gray', 'grey') cnames[k] = v def is_color_like(c): 'Return *True* if *c* can be converted to *RGB*' try: colorConverter.to_rgb(c) return True except ValueError: return False def rgb2hex(rgb): 'Given a len 3 rgb tuple of 0-1 floats, return the hex string' return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb]) hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z") def hex2color(s): """ Take a hex string *s* and return the corresponding rgb 3-tuple Example: #efefef -> (0.93725, 0.93725, 0.93725) """ if not isinstance(s, basestring): raise TypeError('hex2color requires a string argument') if hexColorPattern.match(s) is None: raise ValueError('invalid hex color string "%s"' % s) return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])]) class ColorConverter: """ Provides methods for converting color specifications to *RGB* or *RGBA* Caching is used for more efficient conversion upon repeated calls with the same argument. Ordinarily only the single instance instantiated in this module, *colorConverter*, is needed. """ colors = { 'b' : (0.0, 0.0, 1.0), 'g' : (0.0, 0.5, 0.0), 'r' : (1.0, 0.0, 0.0), 'c' : (0.0, 0.75, 0.75), 'm' : (0.75, 0, 0.75), 'y' : (0.75, 0.75, 0), 'k' : (0.0, 0.0, 0.0), 'w' : (1.0, 1.0, 1.0), } cache = {} def to_rgb(self, arg): """ Returns an *RGB* tuple of three floats from 0-1. *arg* can be an *RGB* or *RGBA* sequence or a string in any of several forms: 1) a letter from the set 'rgbcmykw' 2) a hex color string, like '#00FFFF' 3) a standard name, like 'aqua' 4) a float, like '0.4', indicating gray on a 0-1 scale if *arg* is *RGBA*, the *A* will simply be discarded. """ try: return self.cache[arg] except KeyError: pass except TypeError: # could be unhashable rgb seq arg = tuple(arg) try: return self.cache[arg] except KeyError: pass except TypeError: raise ValueError( 'to_rgb: arg "%s" is unhashable even inside a tuple' % (str(arg),)) try: if cbook.is_string_like(arg): color = self.colors.get(arg, None) if color is None: str1 = cnames.get(arg, arg) if str1.startswith('#'): color = hex2color(str1) else: fl = float(arg) if fl < 0 or fl > 1: raise ValueError( 'gray (string) must be in range 0-1') color = tuple([fl]*3) elif cbook.iterable(arg): if len(arg) > 4 or len(arg) < 3: raise ValueError( 'sequence length is %d; must be 3 or 4'%len(arg)) color = tuple(arg[:3]) if [x for x in color if (float(x) < 0) or (x > 1)]: # This will raise TypeError if x is not a number. raise ValueError('number in rbg sequence outside 0-1 range') else: raise ValueError('cannot convert argument to rgb sequence') self.cache[arg] = color except (KeyError, ValueError, TypeError), exc: raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc)) # Error messages could be improved by handling TypeError # separately; but this should be rare and not too hard # for the user to figure out as-is. return color def to_rgba(self, arg, alpha=None): """ Returns an *RGBA* tuple of four floats from 0-1. For acceptable values of *arg*, see :meth:`to_rgb`. If *arg* is an *RGBA* sequence and *alpha* is not *None*, *alpha* will replace the original *A*. """ try: if not cbook.is_string_like(arg) and cbook.iterable(arg): if len(arg) == 4: if [x for x in arg if (float(x) < 0) or (x > 1)]: # This will raise TypeError if x is not a number. raise ValueError('number in rbga sequence outside 0-1 range') if alpha is None: return tuple(arg) if alpha < 0.0 or alpha > 1.0: raise ValueError("alpha must be in range 0-1") return arg[0], arg[1], arg[2], arg[3] * alpha r,g,b = arg[:3] if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]: raise ValueError('number in rbg sequence outside 0-1 range') else: r,g,b = self.to_rgb(arg) if alpha is None: alpha = 1.0 return r,g,b,alpha except (TypeError, ValueError), exc: raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc)) def to_rgba_array(self, c, alpha=None): """ Returns a numpy array of *RGBA* tuples. Accepts a single mpl color spec or a sequence of specs. Special case to handle "no color": if *c* is "none" (case-insensitive), then an empty array will be returned. Same for an empty list. """ try: if c.lower() == 'none': return np.zeros((0,4), dtype=np.float_) except AttributeError: pass if len(c) == 0: return np.zeros((0,4), dtype=np.float_) try: result = np.array([self.to_rgba(c, alpha)], dtype=np.float_) except ValueError: if isinstance(c, np.ndarray): if c.ndim != 2 and c.dtype.kind not in 'SU': raise ValueError("Color array must be two-dimensional") result = np.zeros((len(c), 4)) for i, cc in enumerate(c): result[i] = self.to_rgba(cc, alpha) # change in place return np.asarray(result, np.float_) colorConverter = ColorConverter() def makeMappingArray(N, data): """Create an *N* -element 1-d lookup table *data* represented by a list of x,y0,y1 mapping correspondences. Each element in this list represents how a value between 0 and 1 (inclusive) represented by x is mapped to a corresponding value between 0 and 1 (inclusive). The two values of y are to allow for discontinuous mapping functions (say as might be found in a sawtooth) where y0 represents the value of y for values of x <= to that given, and y1 is the value to be used for x > than that given). The list must start with x=0, end with x=1, and all values of x must be in increasing order. Values between the given mapping points are determined by simple linear interpolation. The function returns an array "result" where ``result[x*(N-1)]`` gives the closest value for values of x between 0 and 1. """ try: adata = np.array(data) except: raise TypeError("data must be convertable to an array") shape = adata.shape if len(shape) != 2 and shape[1] != 3: raise ValueError("data must be nx3 format") x = adata[:,0] y0 = adata[:,1] y1 = adata[:,2] if x[0] != 0. or x[-1] != 1.0: raise ValueError( "data mapping points must start with x=0. and end with x=1") if np.sometrue(np.sort(x)-x): raise ValueError( "data mapping points must have x in increasing order") # begin generation of lookup table x = x * (N-1) lut = np.zeros((N,), np.float) xind = np.arange(float(N)) ind = np.searchsorted(x, xind)[1:-1] lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1])) * (y0[ind] - y1[ind-1]) + y1[ind-1]) lut[0] = y1[0] lut[-1] = y0[-1] # ensure that the lut is confined to values between 0 and 1 by clipping it np.clip(lut, 0.0, 1.0) #lut = where(lut > 1., 1., lut) #lut = where(lut < 0., 0., lut) return lut class Colormap: """Base class for all scalar to rgb mappings Important methods: * :meth:`set_bad` * :meth:`set_under` * :meth:`set_over` """ def __init__(self, name, N=256): """ Public class attributes: :attr:`N` : number of rgb quantization levels :attr:`name` : name of colormap """ self.name = name self.N = N self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything. self._rgba_under = None self._rgba_over = None self._i_under = N self._i_over = N+1 self._i_bad = N+2 self._isinit = False def __call__(self, X, alpha=1.0, bytes=False): """ *X* is either a scalar or an array (of any dimension). If scalar, a tuple of rgba values is returned, otherwise an array with the new shape = oldshape+(4,). If the X-values are integers, then they are used as indices into the array. If they are floating point, then they must be in the interval (0.0, 1.0). Alpha must be a scalar. If bytes is False, the rgba values will be floats on a 0-1 scale; if True, they will be uint8, 0-255. """ if not self._isinit: self._init() alpha = min(alpha, 1.0) # alpha must be between 0 and 1 alpha = max(alpha, 0.0) self._lut[:-3, -1] = alpha mask_bad = None if not cbook.iterable(X): vtype = 'scalar' xa = np.array([X]) else: vtype = 'array' xma = ma.asarray(X) xa = xma.filled(0) mask_bad = ma.getmask(xma) if xa.dtype.char in np.typecodes['Float']: np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1. # The following clip is fast, and prevents possible # conversion of large positive values to negative integers. if NP_CLIP_OUT: np.clip(xa * self.N, -1, self.N, out=xa) else: xa = np.clip(xa * self.N, -1, self.N) xa = xa.astype(int) # Set the over-range indices before the under-range; # otherwise the under-range values get converted to over-range. np.putmask(xa, xa>self.N-1, self._i_over) np.putmask(xa, xa<0, self._i_under) if mask_bad is not None and mask_bad.shape == xa.shape: np.putmask(xa, mask_bad, self._i_bad) if bytes: lut = (self._lut * 255).astype(np.uint8) else: lut = self._lut rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype) lut.take(xa, axis=0, mode='clip', out=rgba) # twice as fast as lut[xa]; # using the clip or wrap mode and providing an # output array speeds it up a little more. if vtype == 'scalar': rgba = tuple(rgba[0,:]) return rgba def set_bad(self, color = 'k', alpha = 1.0): '''Set color to be used for masked values. ''' self._rgba_bad = colorConverter.to_rgba(color, alpha) if self._isinit: self._set_extremes() def set_under(self, color = 'k', alpha = 1.0): '''Set color to be used for low out-of-range values. Requires norm.clip = False ''' self._rgba_under = colorConverter.to_rgba(color, alpha) if self._isinit: self._set_extremes() def set_over(self, color = 'k', alpha = 1.0): '''Set color to be used for high out-of-range values. Requires norm.clip = False ''' self._rgba_over = colorConverter.to_rgba(color, alpha) if self._isinit: self._set_extremes() def _set_extremes(self): if self._rgba_under: self._lut[self._i_under] = self._rgba_under else: self._lut[self._i_under] = self._lut[0] if self._rgba_over: self._lut[self._i_over] = self._rgba_over else: self._lut[self._i_over] = self._lut[self.N-1] self._lut[self._i_bad] = self._rgba_bad def _init(): '''Generate the lookup table, self._lut''' raise NotImplementedError("Abstract class only") def is_gray(self): if not self._isinit: self._init() return (np.alltrue(self._lut[:,0] == self._lut[:,1]) and np.alltrue(self._lut[:,0] == self._lut[:,2])) class LinearSegmentedColormap(Colormap): """Colormap objects based on lookup tables using linear segments. The lookup table is generated using linear interpolation for each primary color, with the 0-1 domain divided into any number of segments. """ def __init__(self, name, segmentdata, N=256): """Create color map from linear mapping segments segmentdata argument is a dictionary with a red, green and blue entries. Each entry should be a list of *x*, *y0*, *y1* tuples, forming rows in a table. Example: suppose you want red to increase from 0 to 1 over the bottom half, green to do the same over the middle half, and blue over the top half. Then you would use:: cdict = {'red': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0)], 'green': [(0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.75, 1.0, 1.0), (1.0, 1.0, 1.0)], 'blue': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)]} Each row in the table for a given color is a sequence of *x*, *y0*, *y1* tuples. In each sequence, *x* must increase monotonically from 0 to 1. For any input value *z* falling between *x[i]* and *x[i+1]*, the output value of a given color will be linearly interpolated between *y1[i]* and *y0[i+1]*:: row i: x y0 y1 / / row i+1: x y0 y1 Hence y0 in the first row and y1 in the last row are never used. .. seealso:: :func:`makeMappingArray` """ self.monochrome = False # True only if all colors in map are identical; # needed for contouring. Colormap.__init__(self, name, N) self._segmentdata = segmentdata def _init(self): self._lut = np.ones((self.N + 3, 4), np.float) self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red']) self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green']) self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue']) self._isinit = True self._set_extremes() class ListedColormap(Colormap): """Colormap object generated from a list of colors. This may be most useful when indexing directly into a colormap, but it can also be used to generate special colormaps for ordinary mapping. """ def __init__(self, colors, name = 'from_list', N = None): """ Make a colormap from a list of colors. *colors* a list of matplotlib color specifications, or an equivalent Nx3 floating point array (*N* rgb values) *name* a string to identify the colormap *N* the number of entries in the map. The default is *None*, in which case there is one colormap entry for each element in the list of colors. If:: N < len(colors) the list will be truncated at *N*. If:: N > len(colors) the list will be extended by repetition. """ self.colors = colors self.monochrome = False # True only if all colors in map are identical; # needed for contouring. if N is None: N = len(self.colors) else: if cbook.is_string_like(self.colors): self.colors = [self.colors] * N self.monochrome = True elif cbook.iterable(self.colors): self.colors = list(self.colors) # in case it was a tuple if len(self.colors) == 1: self.monochrome = True if len(self.colors) < N: self.colors = list(self.colors) * N del(self.colors[N:]) else: try: gray = float(self.colors) except TypeError: pass else: self.colors = [gray] * N self.monochrome = True Colormap.__init__(self, name, N) def _init(self): rgb = np.array([colorConverter.to_rgb(c) for c in self.colors], np.float) self._lut = np.zeros((self.N + 3, 4), np.float) self._lut[:-3, :-1] = rgb self._lut[:-3, -1] = 1 self._isinit = True self._set_extremes() class Normalize: """ Normalize a given value to the 0-1 range """ def __init__(self, vmin=None, vmax=None, clip=False): """ If *vmin* or *vmax* is not given, they are taken from the input's minimum and maximum value respectively. If *clip* is *True* and the given value falls outside the range, the returned value will be 0 or 1, whichever is closer. Returns 0 if:: vmin==vmax Works with scalars or arrays, including masked arrays. If *clip* is *True*, masked values are set to 1; otherwise they remain masked. Clipping silently defeats the purpose of setting the over, under, and masked colors in the colormap, so it is likely to lead to surprises; therefore the default is *clip* = *False*. """ self.vmin = vmin self.vmax = vmax self.clip = clip def __call__(self, value, clip=None): if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin==vmax: return 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = (val-vmin) * (1.0/(vmax-vmin)) if vtype == 'scalar': result = result[0] return result def inverse(self, value): if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax if cbook.iterable(value): val = ma.asarray(value) return vmin + val * (vmax - vmin) else: return vmin + value * (vmax - vmin) def autoscale(self, A): ''' Set *vmin*, *vmax* to min, max of *A*. ''' self.vmin = ma.minimum(A) self.vmax = ma.maximum(A) def autoscale_None(self, A): ' autoscale only None-valued vmin or vmax' if self.vmin is None: self.vmin = ma.minimum(A) if self.vmax is None: self.vmax = ma.maximum(A) def scaled(self): 'return true if vmin and vmax set' return (self.vmin is not None and self.vmax is not None) class LogNorm(Normalize): """ Normalize a given value to the 0-1 range on a log scale """ def __call__(self, value, clip=None): if clip is None: clip = self.clip if cbook.iterable(value): vtype = 'array' val = ma.asarray(value).astype(np.float) else: vtype = 'scalar' val = ma.array([value]).astype(np.float) self.autoscale_None(val) vmin, vmax = self.vmin, self.vmax if vmin > vmax: raise ValueError("minvalue must be less than or equal to maxvalue") elif vmin<=0: raise ValueError("values must all be positive") elif vmin==vmax: return 0.0 * val else: if clip: mask = ma.getmask(val) val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin)) if vtype == 'scalar': result = result[0] return result def inverse(self, value): if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax if cbook.iterable(value): val = ma.asarray(value) return vmin * ma.power((vmax/vmin), val) else: return vmin * pow((vmax/vmin), value) class BoundaryNorm(Normalize): ''' Generate a colormap index based on discrete intervals. Unlike :class:`Normalize` or :class:`LogNorm`, :class:`BoundaryNorm` maps values to integers instead of to the interval 0-1. Mapping to the 0-1 interval could have been done via piece-wise linear interpolation, but using integers seems simpler, and reduces the number of conversions back and forth between integer and floating point. ''' def __init__(self, boundaries, ncolors, clip=False): ''' *boundaries* a monotonically increasing sequence *ncolors* number of colors in the colormap to be used If:: b[i] <= v < b[i+1] then v is mapped to color j; as i varies from 0 to len(boundaries)-2, j goes from 0 to ncolors-1. Out-of-range values are mapped to -1 if low and ncolors if high; these are converted to valid indices by :meth:`Colormap.__call__` . ''' self.clip = clip self.vmin = boundaries[0] self.vmax = boundaries[-1] self.boundaries = np.asarray(boundaries) self.N = len(self.boundaries) self.Ncmap = ncolors if self.N-1 == self.Ncmap: self._interp = False else: self._interp = True def __call__(self, x, clip=None): if clip is None: clip = self.clip x = ma.asarray(x) mask = ma.getmaskarray(x) xx = x.filled(self.vmax+1) if clip: np.clip(xx, self.vmin, self.vmax) iret = np.zeros(x.shape, dtype=np.int16) for i, b in enumerate(self.boundaries): iret[xx>=b] = i if self._interp: iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16) iret[xx<self.vmin] = -1 iret[xx>=self.vmax] = self.Ncmap ret = ma.array(iret, mask=mask) if ret.shape == () and not mask: ret = int(ret) # assume python scalar return ret def inverse(self, value): return ValueError("BoundaryNorm is not invertible") class NoNorm(Normalize): ''' Dummy replacement for Normalize, for the case where we want to use indices directly in a :class:`~matplotlib.cm.ScalarMappable` . ''' def __call__(self, value, clip=None): return value def inverse(self, value): return value # compatibility with earlier class names that violated convention: normalize = Normalize no_norm = NoNorm
agpl-3.0
karstenw/nodebox-pyobjc
examples/Extended Application/matplotlib/examples/api/line_with_text.py
1
2601
""" ======================= Artist within an artist ======================= Show how to override basic methods so an artist can contain another artist. In this case, the line contains a Text instance to label it. """ import numpy as np import matplotlib.pyplot as plt import matplotlib.lines as lines import matplotlib.transforms as mtransforms import matplotlib.text as mtext # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end class MyLine(lines.Line2D): def __init__(self, *args, **kwargs): # we'll update the position when the line data is set self.text = mtext.Text(0, 0, '') lines.Line2D.__init__(self, *args, **kwargs) # we can't access the label attr until *after* the line is # inited self.text.set_text(self.get_label()) def set_figure(self, figure): self.text.set_figure(figure) lines.Line2D.set_figure(self, figure) def set_axes(self, axes): self.text.set_axes(axes) lines.Line2D.set_axes(self, axes) def set_transform(self, transform): # 2 pixel offset texttrans = transform + mtransforms.Affine2D().translate(2, 2) self.text.set_transform(texttrans) lines.Line2D.set_transform(self, transform) def set_data(self, x, y): if len(x): self.text.set_position((x[-1], y[-1])) lines.Line2D.set_data(self, x, y) def draw(self, renderer): # draw my label at the end of the line with 2 pixel offset lines.Line2D.draw(self, renderer) self.text.draw(renderer) # Fixing random state for reproducibility np.random.seed(19680801) fig, ax = plt.subplots() x, y = np.random.rand(2, 20) line = MyLine(x, y, mfc='red', ms=12, label='line label') #line.text.set_text('line label') line.text.set_color('red') line.text.set_fontsize(16) ax.add_line(line) pltshow(plt)
mit
manpen/hypergen
libs/NetworKit/networkit/profiling/stat.py
2
16463
# # file: stat.py # author: Mark Erb # from . import job import math import matplotlib.pyplot as plt from _NetworKit import sort2 from _NetworKit import ranked def sorted(sample): """ returns a sorted list of given numbers """ return sort2(sample) class Stat(job.Job): """ statistical computation object """ def __init__(self, name, params): """ constructor: see PlotJob and .run() """ job.Job.__init__( self, "Stat", name ) self.__params = params def run(self): """ computation """ (sample, sampleSorted, sampleRanked, calculatePie) = self.__params n = len(sample) results = {} results["Properties"] = {} results["Location"] = {} results["Dispersion"] = {} results["Shape"] = {} results["Binning"] = {} results["Distribution"] = {} results["Properties"]["Size"] = n def funcMin(): result = sampleSorted[0] return result results["Location"]["Min"] = min = funcMin() def funcMax(): result = sampleSorted[n-1] return result results["Location"]["Max"] = max = funcMax() def funcBesselsCorrection(): try: result = n / (n-1) except: result = float("nan") return result results["Properties"]["Bessel's Correction"] = besselsCorrection = funcBesselsCorrection() def hoelderMean(sample, p): result = 0 for i in range(n): result += sample[i] ** p result /= n result **= 1 / p return result results["Location"]["Arithmetic Mean"] = arithmeticMean = hoelderMean(sample, 1) results["Location"]["Quadratic Mean"] = quadraticMean = hoelderMean(sample, 2) results["Location"]["Cubic Mean"] = cubicMean = hoelderMean(sample, 3) if min > 0: results["Location"]["Harmonic Mean"] = harmonicMean = hoelderMean(sample, -1) else: results["Location"]["Harmonic Mean"] = harmonicMean = float("nan") def funcArithmeticMeanRank(): result = (n + 1) / 2 return result results["Location"]["Arithmetic Mean (Rank)"] = arithmeticMean_Rank = funcArithmeticMeanRank() def funcUncorrectedVariance(sample, arithmeticMean): result = 0 for i in range(n): result += (sample[i] - arithmeticMean) ** 2 result /= n return result results["Dispersion"]["Uncorrected Variance"] = variance_uncorrected = funcUncorrectedVariance(sample, arithmeticMean) results["Dispersion"]["Uncorrected Variance (Rank)"] = variance_Rank_uncorrected = funcUncorrectedVariance(sampleRanked, arithmeticMean_Rank) def funcVariance(variance_uncorrected): result = variance_uncorrected * besselsCorrection return result results["Dispersion"]["Variance"] = variance = funcVariance(variance_uncorrected) results["Dispersion"]["Variance (Rank)"] = variance_Rank = funcVariance(variance_Rank_uncorrected) def funcStandardDeviation(variance): result = math.sqrt(variance) return result results["Dispersion"]["Standard Deviation"] = s_n = funcStandardDeviation(variance) results["Dispersion"]["Standard Deviation (Rank)"] = s_n_Rank = funcStandardDeviation(variance_Rank) results["Dispersion"]["Uncorrected Standard Deviation"] = s_n_uncorrected = funcStandardDeviation(variance_uncorrected) results["Dispersion"]["Uncorrected Standard Deviation (Rank)"] = s_n_Rank_uncorrected = funcStandardDeviation(variance_Rank_uncorrected) def funcCoefficientOfVariation(s_n, arithmeticMean): result = float("nan") if arithmeticMean != 0: result = s_n / arithmeticMean return result results["Dispersion"]["Coefficient Of Variation"] = c_v = funcCoefficientOfVariation(s_n, arithmeticMean) results["Dispersion"]["Coefficient Of Variation (Rank)"] = c_v_Rank = funcCoefficientOfVariation(s_n_Rank, arithmeticMean_Rank) results["Dispersion"]["Uncorrected Coefficient Of Variation"] = c_v = funcCoefficientOfVariation(s_n_uncorrected, arithmeticMean) results["Dispersion"]["Uncorrected Coefficient Of Variation (Rank)"] = c_v_Rank = funcCoefficientOfVariation(s_n_Rank_uncorrected, arithmeticMean_Rank) def funcAlphaQuartile(alpha): k_real = (alpha * n) k = math.floor(k_real) if (k != k_real) or (k < 1): result = sampleSorted[(k-1)+1] else: result = 0.5 * (sampleSorted[(k-1)] + sampleSorted[(k-1)+1]) return result results["Location"]["1st Quartile"] = Q1 = funcAlphaQuartile(0.25) results["Location"]["Median"] = median = funcAlphaQuartile(0.5) results["Location"]["3rd Quartile"] = Q3 = funcAlphaQuartile(0.75) def funcAlphaTrimmedMean(alpha): k = math.floor(alpha * n) i = k+1 result = 0 while (i < n-k+1): result += sampleSorted[(i-1)] i += 1 result /= n - 2*(k) return result results["Location"]["Interquartile Mean"] = IQM = funcAlphaTrimmedMean(0.25) def funcIQR(): result = Q3 - Q1 return result results["Dispersion"]["Interquartile Range"] = IQR = funcIQR() def funcSampleRange(): result = max - min return result results["Dispersion"]["Sample Range"] = sampleRange = funcSampleRange() def funcMidRange(): result = (min + max)/ 2 return result results["Location"]["Mid-Range"] = midRange = funcMidRange() def funcSkewnessYP(): result = float("nan") if s_n != 0: result = 3 * (arithmeticMean - median) / s_n return result results["Shape"]["Skewness YP"] = skewness_yp = funcSkewnessYP() def funcMomentum(p): result = float("nan") if s_n != 0: result = 0 for i in range(n): result += ((sample[i] - arithmeticMean) / s_n) ** p result /= n return result def funcSkewnessM(): result = funcMomentum(3) return result results["Shape"]["Skewness M"] = skewnewss_m = funcSkewnessM() def funcKurtosis(): result = funcMomentum(4) - 3 return result results["Shape"]["Kurtosis"] = kurtosis = funcKurtosis() def funcNumberOfBins(commulative): result = 1 if min < max: if commulative: result = 256 else: result = math.sqrt(n) if (result < 5): result = 5 elif (result > 20): result = 20 return int(result) results["Binning"]["Number Histogram"] = k_Bins_Histogram = funcNumberOfBins(False) k_Bins_CDF = funcNumberOfBins(True) def funcIntervals(numberOfBins): result = [] w = sampleRange / numberOfBins result.append(min) for i in range(1, numberOfBins): result.append(min + w * i) result.append(max if min < max else max+10e-12) return result results["Binning"]["Intervals Histogram"] = intervalsHistogram = funcIntervals(k_Bins_Histogram) intervalsCDF = funcIntervals(k_Bins_CDF) def funcBinAbsoluteFrequencies(numberOfBins, intervals, comulative): result = [] index = 0 for i in range(numberOfBins): result.append(0) for i in range(n): value = sampleSorted[i] while intervals[index + 1] < value: index += 1 if comulative and index > 0: result[index] = result[index-1] result[index] += 1 return result results["Binning"]["Absolute Frequencies Histogram"] = absoluteFrequenciesHistogram = funcBinAbsoluteFrequencies(k_Bins_Histogram, intervalsHistogram, False) absoluteFrequenciesCDF = funcBinAbsoluteFrequencies(k_Bins_CDF, intervalsCDF, True) def funcJoinEmptyBins(k_Bin, intervals, frequencies, commulative): result = k_Bin value = 0 for i in range(k_Bin): if frequencies[k_Bin-i-1] == value: del frequencies[k_Bin-i-1] del intervals[k_Bin-i] result -= 1 if commulative: value = frequencies[k_Bin-i-1] return result results["Binning"]["Number CDF"] = k_Bins_CDF = funcJoinEmptyBins(k_Bins_CDF, intervalsCDF, absoluteFrequenciesCDF, True) results["Binning"]["Absolute Frequencies CDF"] = absoluteFrequenciesCDF results["Binning"]["Intervals CDF"] = intervalsCDF def funcBinRelativeFrequencies(absoluteFrequencies): result = [] for H in absoluteFrequencies: result.append(H / n) return result results["Binning"]["Relative Frequencies Histogram"] = relativeFrequenciesHistogram = funcBinRelativeFrequencies(absoluteFrequenciesHistogram) results["Binning"]["Relative Frequencies CDF"] = relativeFrequenciesCDF = funcBinRelativeFrequencies(absoluteFrequenciesCDF) def funcMode(): index = 0 max = 0 for i in range(len(absoluteFrequenciesHistogram)): if absoluteFrequenciesHistogram[i] > max: max = absoluteFrequenciesHistogram[i] index = i result = ((intervalsHistogram[index]+intervalsHistogram[index+1]) / 2, max) return result results["Binning"]["Mode"] = mode = funcMode() def funcLowerOutliers(): lowerBound = Q1 - IQR * 3 upperBound = Q1 - IQR * 1.5 result_lower = min result_upper = min state = 0 for i in range(n): value = sampleSorted[i] if value >= lowerBound and state == 0: result_lower = value state = 1 if value >= upperBound and state == 1: result_upper = value break return (result_lower, result_upper) results["Location"]["Outlier (Lower)"] = funcLowerOutliers() def funcUpperOutliers(): lowerBound = Q3 + IQR * 1.5 upperBound = Q3 + IQR * 3 result_lower = max result_upper = max state = 0 for i in range(n): value = sampleSorted[n-i-1] if value <= upperBound and state == 0: result_upper = value state = 1 if value <= lowerBound and state == 1: result_lower = value break return (result_upper, result_lower) results["Location"]["Outlier (Upper)"] = funcUpperOutliers() if calculatePie: def funcPie(): n = len(sample) sum = 0 for i in range(n): sum += sample[i] min = 0.015 cutSize = 0 cutValue = 0 relativeFrequencies = [0] for i in range(n): value = sampleSorted[i]/sum if value < min: relativeFrequencies[0] += value cutSize += 1 cutValue += value else: relativeFrequencies.append(value) return (relativeFrequencies, cutSize) results["Binning"]["Pie"] = funcPie() # The following code within this class is experimental and under develop # # Chi-Squared-Test <- Correct Binning # For Test-Case Purpose uncomment the following lines # n = 100 # arithmeticMean = 51.05 # s_n = 1.209 # absoluteFrequencies = [5, 11, 35, 29, 13, 7] # intervals = [-10000, 49, 50, 51, 52, 53, 10000] # k_Bins = len(absoluteFrequencies) def funcErf(x): sign = 1 if x >= 0 else -1 x = abs(x) a1 = 0.254829592 a2 = -0.284496736 a3 = 1.421413741 a4 = -1.453152027 a5 = 1.061405429 p = 0.3275911 t = 1.0/(1.0 + p*x) y = 1 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1) * t * math.exp(-x*x) return sign*y def funcDistributionNormal(x): result = 1/2 * (1 + funcErf((x-arithmeticMean)/(math.sqrt(2) * s_n))) return result def funcDistributionExponential(x): result = 1 - math.exp((-1/arithmeticMean) * x) return result def funcDistributionExponentialInverse(x): result = math.ln(1/(1-a))*arithmeticMean return result def funcIncompleteGamma(s, x): if x < 0.0: return 0.0 sc = (1.0 / s) sc *= math.pow(x, s) sc *= math.exp(-x) sum = 1.0 nom = 1.0 denom = 1.0 for i in range(1, 128): nom *= x denom *= s + i sum += (nom / denom) return sum * sc def funcGamma(x): result = (x / math.e) ** x result *= math.sqrt(2 * math.pi / x) result *= (1 + 1/(12 * x*x - 1/10)) ** x return result def funcPValue(criticalValue, degreesOfFreedom): if criticalValue < 0.0 or degreesOfFreedom < 1: return 0.0 k = degreesOfFreedom * 0.5 x = criticalValue * 0.5 if degreesOfFreedom == 2: return math.exp(-x) result = funcIncompleteGamma(k, x) result /= funcGamma(k) return 1-result def funcNumberOfBinsChiSquaredTest(): result = 1 + ln(n)/ln(2) if result > 128: result = 128 return int(result) # TODO: # def funcIntervalsChiSquaredTest(inverseFunction, min, max, k_Bins): # bin_size = n / k # result = [] # return result # def funcChiSquaredTest(distribution, numberOfUsedEstimators): # z = 0; # pValue = 0 # for i in range(k_Bins): # p_i = distribution(intervals[i+1]) - distribution(intervals[i]) # hypotheticAbsoluteFrequency = n*p_i # if hypotheticAbsoluteFrequency == 0: # if absoluteFrequencies[i] == 0: # continue # z = float("Inf") # break # d = absoluteFrequencies[i] - hypotheticAbsoluteFrequency # z += d*d / hypotheticAbsoluteFrequency # print(self.getName(), hypotheticAbsoluteFrequency, absoluteFrequencies[i], z) # degreesOfFreedom = (k_Bins - 1) - numberOfUsedEstimators # pValue = funcPValue(z, degreesOfFreedom) # return (z, pValue) # results["Distribution"]["Chi-Square-Test (Normal)"] = funcChiSquaredTest(funcDistributionNormal, 2) # results["Distribution"]["Chi-Square-Test (Exponential)"] = funcChiSquaredTest(funcDistributionExponential, 1) return results class Correlation(job.Job): """ correlation computation object """ def __init__(self, name, params): """ constructor: see PlotJob and .run() """ job.Job.__init__( self, "Correlation", name ) self.__params = params def run(self): """ computation """ (nameB, sample_1, sampleRanked_1, stat_1, sample_2, sampleRanked_2, stat_2) = self.__params n = len(sample_1) assert (n == len(sample_2)), "sample sizes are not equal" results = {} results["Value"] = {} def funcCovariance(sample_1, arithmeticMean_1, sample_2, arithmeticMean_2): result = 0 for i in range(n): result += (sample_1[i]- arithmeticMean_1) * (sample_2[i] - arithmeticMean_2) result /= n return result results["Value"]["Covariance"] = covariance = funcCovariance( sample_1, stat_1["Location"]["Arithmetic Mean"], sample_2, stat_2["Location"]["Arithmetic Mean"] ) results["Value"]["Covariance (Rank)"] = covarianceRanked = funcCovariance( sampleRanked_1, stat_1["Location"]["Arithmetic Mean (Rank)"], sampleRanked_2, stat_2["Location"]["Arithmetic Mean (Rank)"] ) def funcPearsonsCorrelationCoefficient(covariance, uncorrectedStandardDeviation_1, uncorrectedStandardDeviation_2): result = float("nan") if uncorrectedStandardDeviation_1 * uncorrectedStandardDeviation_2 != 0: result = covariance / (uncorrectedStandardDeviation_1 * uncorrectedStandardDeviation_2) return result results["Value"]["Pearson's Correlation Coefficient"] = funcPearsonsCorrelationCoefficient( covariance, stat_1["Dispersion"]["Uncorrected Standard Deviation"], stat_2["Dispersion"]["Uncorrected Standard Deviation"] ) results["Value"]["Spearman's Rank Correlation Coefficient"] = funcPearsonsCorrelationCoefficient( covarianceRanked, stat_1["Dispersion"]["Uncorrected Standard Deviation (Rank)"], stat_2["Dispersion"]["Uncorrected Standard Deviation (Rank)"] ) def funcFechnersCorrelationCoefficent(arithmeticMean_1, arithmeticMean_2): result = 0 for i in range(n): result += math.copysign(1.0, (sample_1[i] - arithmeticMean_1) * (sample_2[i] - arithmeticMean_2)) result /= n return result results["Value"]["Fechner's Correlation Coefficient"] = funcFechnersCorrelationCoefficent( stat_1["Location"]["Arithmetic Mean"], stat_2["Location"]["Arithmetic Mean"] ) def funcHexBinning(sample_1, sample_2): """ binning for scatter plots """ result = {} n = 32 fig = plt.figure() extent = [ stat_1["Location"]["Min"], stat_1["Location"]["Max"], stat_2["Location"]["Min"], stat_2["Location"]["Max"] ] image = plt.hexbin( sample_1, sample_2, gridsize = n, extent = extent ) result["Grid Size"] = n result["Absolute Frequencies"] = frequencies = image.get_array() max = 0 for value in frequencies: if max < value: max = value result["Max Frequency"] = max result["Offsets"] = image.get_offsets() result["Paths"] = image.get_paths()[0] plt.close(fig) return result results["Binning"] = funcHexBinning( sample_1, sample_2 ) return (nameB, results)
gpl-3.0
tomolaf/trading-with-python
sandbox/spreadCalculations.py
78
1496
''' Created on 28 okt 2011 @author: jev ''' from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener from tradingWithPython.lib import yahooFinance from pandas import DataFrame, Series import numpy as np import matplotlib.pyplot as plt import os symbols = ['SPY','IWM'] y = yahooFinance.HistData('temp.csv') y.startDate = (2007,1,1) df = y.loadSymbols(symbols,forceDownload=False) #df = y.downloadData(symbols) res = readBiggerScreener('CointPairs.csv') #---check with spread scanner #sp = DataFrame(index=symbols) # #sp['last'] = df.ix[-1,:] #sp['targetCapital'] = Series({'SPY':100,'IWM':-100}) #sp['targetShares'] = sp['targetCapital']/sp['last'] #print sp #The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero) #s = Spread(symbols, histClose = df) #print s #s.value.plot() #print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns') #print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log') #print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard') #p = Portfolio(df) #p.setShares([1, -1.7]) #p.value.plot() quote = yahooFinance.getQuote(symbols) print quote s = Spread(symbols,histClose=df, estimateBeta = False) s.setLast(quote['last']) s.setShares(Series({'SPY':1,'IWM':-1.7})) print s #s.value.plot() #s.plot() fig = figure(2) s.plot()
bsd-3-clause
perslab/DEPICT
src/python/depict.py
1
9172
#!/usr/bin/env python2.7 import warnings warnings.filterwarnings("ignore") import os,sys,pdb,logging,subprocess,ConfigParser,argparse from glob import glob import pandas as pd # Read path to config file parser = argparse.ArgumentParser(description='DEPICT') parser.add_argument('cfg_file', metavar='DEPICT configuration file', type=str, help='DEPICT configuration file, all user inputs are specified in this file') args = parser.parse_args() # Paths path_to_depict = os.path.realpath(__file__).split('/') depict_path = "/".join(path_to_depict[0:(len(path_to_depict)-3)]) data_path = "data/" sys.path.append("%s/src/python"%depict_path) from depict_library import write_depict_loci,write_plink_input,run_depict # Parse the config file cfg = ConfigParser.ConfigParser() cfg_file_found = cfg.read(args.cfg_file) analysis_path = cfg.get("PATHS",'analysis_path') step_write_depict_loci = cfg.getboolean("DEPICT SETTINGS", "step_construct_depict_loci") step_depict_geneprio = cfg.getboolean("DEPICT SETTINGS", "step_depict_geneprio") step_depict_gsea = cfg.getboolean("DEPICT SETTINGS", "step_depict_gsea") step_depict_tissueenrichment = cfg.getboolean("DEPICT SETTINGS", "step_depict_tissueenrichment") # GWAS summary statistics input file parameters (only autosomal SNPs are included) association_pvalue_cutoff = cfg.getfloat("GWAS FILE SETTINGS",'association_pvalue_cutoff') filename = cfg.get("GWAS FILE SETTINGS",'gwas_summary_statistics_file') label = cfg.get("GWAS FILE SETTINGS",'label_for_output_files') pvalue_col_name = cfg.get("GWAS FILE SETTINGS",'pvalue_col_name') marker_col_name = cfg.get("GWAS FILE SETTINGS",'marker_col_name') if cfg.get("GWAS FILE SETTINGS",'marker_col_name') != '' else None chr_col_name = cfg.get("GWAS FILE SETTINGS",'chr_col_name') if cfg.get("GWAS FILE SETTINGS",'chr_col_name') != '' else None pos_col_name = cfg.get("GWAS FILE SETTINGS",'pos_col_name') if cfg.get("GWAS FILE SETTINGS",'pos_col_name') != '' else None sep = cfg.get("GWAS FILE SETTINGS",'separator') # PLINK and genotype files plink_executable = cfg.get("PLINK SETTINGS",'plink_executable') genotype_data_plink_prefix = "{}/{}".format(depict_path,cfg.get("PLINK SETTINGS",'genotype_data_plink_prefix')) if cfg.get("PLINK SETTINGS",'genotype_data_plink_prefix').startswith("data/") else cfg.get("PLINK SETTINGS",'genotype_data_plink_prefix') plink_clumping_snp_column_header = cfg.get("PLINK SETTINGS",'plink_clumping_snp_column_header') association_pvalue_cutoff_column_header = cfg.get("PLINK SETTINGS",'association_pvalue_cutoff_column_header') plink_clumping_distance = cfg.getint("PLINK SETTINGS",'plink_clumping_distance') plink_clumping_r2 = cfg.getfloat("PLINK SETTINGS",'plink_clumping_r2') # Locus construction paramenters collection_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS",'collection_file')) if cfg.get("MISC SETTINGS",'collection_file').startswith("data/") else cfg.get("MISC SETTINGS",'collection_file') locus_file = "%s/%s_loci.txt"%(analysis_path,label) number_random_runs = cfg.getint("MISC SETTINGS",'number_random_runs') background_plink_clumping_pvalue = cfg.getfloat("MISC SETTINGS",'background_plink_clumping_pvalue') background_data_path = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","background_data_path")) if cfg.get("MISC SETTINGS","background_data_path").startswith("data/") else cfg.get("MISC SETTINGS","background_data_path") null_gwas_prefix = "{}/null_gwas/dgi_1kg_hg19_null".format(background_data_path) req_fraction_of_background_files = cfg.getfloat("MISC SETTINGS",'req_fraction_of_background_files') # DEPICT parameters java_executable = "java" depict_jar = "%s/src/java/dist/Depict.jar"%depict_path ncores = cfg.getint("MISC SETTINGS",'number_of_threads') heap_size_in_mb = cfg.getint("MISC SETTINGS",'heap_size_in_mb') max_top_genes_for_gene_set = cfg.getint("MISC SETTINGS",'max_top_genes_for_gene_set') nr_repititions = cfg.getint("MISC SETTINGS",'nr_repititions') nr_permutations = cfg.getint("MISC SETTINGS",'nr_permutations') mhc_start_bp = cfg.getint("MISC SETTINGS",'mhc_start_bp') mhc_end_bp = cfg.getint("MISC SETTINGS",'mhc_end_bp') reconstituted_genesets_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","reconstituted_genesets_file")) if cfg.get("MISC SETTINGS",'reconstituted_genesets_file').startswith("data/") else cfg.get("MISC SETTINGS",'reconstituted_genesets_file') tissue_expression_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","tissue_expression_file")) if cfg.get("MISC SETTINGS",'tissue_expression_file').startswith("data/") else cfg.get("MISC SETTINGS",'tissue_expression_file') tissue_mapping_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","tissue_mapping_file")) if cfg.get("MISC SETTINGS",'tissue_mapping_file').startswith("data/") else cfg.get("MISC SETTINGS",'tissue_mapping_file') depict_gene_annotation_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","depict_gene_annotation_file")) if cfg.get("MISC SETTINGS","depict_gene_annotation_file").startswith("data/") else cfg.get("MISC SETTINGS","depict_gene_annotation_file") go_mapping_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","go_mapping_file")) if cfg.get("MISC SETTINGS","go_mapping_file").startswith("data/") else cfg.get("MISC SETTINGS","go_mapping_file") mgi_mapping_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","mgi_mapping_file")) if cfg.get("MISC SETTINGS","mgi_mapping_file").startswith("data/") else cfg.get("MISC SETTINGS","mgi_mapping_file") inweb_mapping_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","inweb_mapping_file")) if cfg.get("MISC SETTINGS","inweb_mapping_file").startswith("data/") else cfg.get("MISC SETTINGS","inweb_mapping_file") eqtl_mapping_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","eqtl_mapping_file")) if cfg.get("MISC SETTINGS","eqtl_mapping_file").startswith("data/") else cfg.get("MISC SETTINGS","eqtl_mapping_file") eqtl_file = "{}/{}".format(depict_path,cfg.get("MISC SETTINGS","eqtl_file")) if cfg.get("MISC SETTINGS","eqtl_file").startswith("data/") else cfg.get("MISC SETTINGS","eqtl_file") prioritize_genes_outside_input_loci= cfg.getboolean("MISC SETTINGS",'prioritize_genes_outside_input_loci') leave_out_chr = cfg.get("MISC SETTINGS",'leave_out_chr') export_cofunc_and_exit = cfg.getboolean("MISC SETTINGS",'export_cofunc_and_exit') export_cofunc_type = cfg.get("MISC SETTINGS",'export_cofunc_type') export_cofunc_label = cfg.get("MISC SETTINGS",'export_cofunc_label') # Construct directory if it does not exist if not os.path.exists(analysis_path): print "\nCreating directory were results will be saved ({})".format(analysis_path) os.makedirs(analysis_path) else: print "\nWill store result files to {}".format(analysis_path) # Logging and contact log_file = '%s/%s_log.txt'%(analysis_path,label) logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO) depict_contact_email = "tunepers@broadinstitute.org" # Background loci folder background_loci_dir_suffix = "nperm{numruns}_kb{dis}_rsq{rsq}_mhc{mhcsta}-{mhcend}_col{collection}/".format(\ numruns=number_random_runs,\ dis=plink_clumping_distance,\ rsq=plink_clumping_r2,\ mhcsta=mhc_start_bp,\ mhcend=mhc_end_bp,\ collection=collection_file.split("/")[-1].replace(".txt.gz","").replace('_','-')) # Read PLINK index SNPs and write DEPICT locus file if step_write_depict_loci: map_log = write_plink_input(analysis_path, filename, label, marker_col_name, pvalue_col_name, chr_col_name, pos_col_name, sep, genotype_data_plink_prefix, association_pvalue_cutoff) logging.info(map_log) loci_log = write_depict_loci(analysis_path,label,association_pvalue_cutoff,collection_file,depict_gene_annotation_file,locus_file,mhc_start_bp,mhc_end_bp,plink_executable,genotype_data_plink_prefix,plink_clumping_distance, plink_clumping_r2,"%s_depict.tab"%label,number_random_runs,background_plink_clumping_pvalue,plink_clumping_snp_column_header,association_pvalue_cutoff_column_header,null_gwas_prefix,depict_contact_email,req_fraction_of_background_files,background_loci_dir_suffix,background_data_path) logging.info(loci_log) # Run DEPICT if step_depict_geneprio or step_depict_gsea or step_depict_tissueenrichment: # Check that locus file exists and get the number of loci if os.path.isfile(locus_file): background_loci_dir = "{}/nloci{}_{}".format(background_data_path,pd.read_csv(locus_file).shape[0],background_loci_dir_suffix) else: sys.exit("Exiting: Locus file ({}) not found. Please make sure DEPICT loci have been constructed.".format(locus_file)) depict_log = run_depict(java_executable, depict_jar, background_loci_dir, locus_file, label, step_depict_geneprio, step_depict_gsea, step_depict_tissueenrichment, ncores, analysis_path, reconstituted_genesets_file, depict_gene_annotation_file, tissue_expression_file, max_top_genes_for_gene_set, nr_repititions, nr_permutations, mhc_start_bp, mhc_end_bp, go_mapping_file, mgi_mapping_file, inweb_mapping_file, tissue_mapping_file, eqtl_mapping_file, eqtl_file, heap_size_in_mb, prioritize_genes_outside_input_loci, leave_out_chr, export_cofunc_and_exit, export_cofunc_type, export_cofunc_label) logging.info(depict_log)
gpl-3.0
gclenaghan/scikit-learn
sklearn/mixture/tests/test_dpgmm.py
261
4490
import unittest import sys import numpy as np from sklearn.mixture import DPGMM, VBGMM from sklearn.mixture.dpgmm import log_normalize from sklearn.datasets import make_blobs from sklearn.utils.testing import assert_array_less, assert_equal from sklearn.mixture.tests.test_gmm import GMMTester from sklearn.externals.six.moves import cStringIO as StringIO np.seterr(all='warn') def test_class_weights(): # check that the class weights are updated # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50) dpgmm.fit(X) # get indices of components that are used: indices = np.unique(dpgmm.predict(X)) active = np.zeros(10, dtype=np.bool) active[indices] = True # used components are important assert_array_less(.1, dpgmm.weights_[active]) # others are not assert_array_less(dpgmm.weights_[~active], .05) def test_verbose_boolean(): # checks that the output for the verbose output is the same # for the flag values '1' and 'True' # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm_bool = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=True) dpgmm_int = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: # generate output with the boolean flag dpgmm_bool.fit(X) verbose_output = sys.stdout verbose_output.seek(0) bool_output = verbose_output.readline() # generate output with the int flag dpgmm_int.fit(X) verbose_output = sys.stdout verbose_output.seek(0) int_output = verbose_output.readline() assert_equal(bool_output, int_output) finally: sys.stdout = old_stdout def test_verbose_first_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_verbose_second_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_log_normalize(): v = np.array([0.1, 0.8, 0.01, 0.09]) a = np.log(2 * v) assert np.allclose(v, log_normalize(a), rtol=0.01) def do_model(self, **kwds): return VBGMM(verbose=False, **kwds) class DPGMMTester(GMMTester): model = DPGMM do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester): covariance_type = 'full' setUp = GMMTester._setUp class VBGMMTester(GMMTester): model = do_model do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester): covariance_type = 'full' setUp = GMMTester._setUp
bsd-3-clause
musically-ut/statsmodels
statsmodels/stats/outliers_influence.py
27
25639
# -*- coding: utf-8 -*- """Influence and Outlier Measures Created on Sun Jan 29 11:16:09 2012 Author: Josef Perktold License: BSD-3 """ from statsmodels.compat.python import lzip from collections import defaultdict import numpy as np from statsmodels.regression.linear_model import OLS from statsmodels.tools.decorators import cache_readonly from statsmodels.stats.multitest import multipletests from statsmodels.tools.tools import maybe_unwrap_results # outliers test convenience wrapper def outlier_test(model_results, method='bonf', alpha=.05, labels=None, order=False): """ Outlier Tests for RegressionResults instances. Parameters ---------- model_results : RegressionResults instance Linear model results method : str - `bonferroni` : one-step correction - `sidak` : one-step correction - `holm-sidak` : - `holm` : - `simes-hochberg` : - `hommel` : - `fdr_bh` : Benjamini/Hochberg - `fdr_by` : Benjamini/Yekutieli See `statsmodels.stats.multitest.multipletests` for details. alpha : float familywise error rate order : bool Whether or not to order the results by the absolute value of the studentized residuals. If labels are provided they will also be sorted. Returns ------- table : ndarray or DataFrame Returns either an ndarray or a DataFrame if labels is not None. Will attempt to get labels from model_results if available. The columns are the Studentized residuals, the unadjusted p-value, and the corrected p-value according to method. Notes ----- The unadjusted p-value is stats.t.sf(abs(resid), df) where df = df_resid - 1. """ from scipy import stats # lazy import infl = getattr(model_results, 'get_influence', None) if infl is None: results = maybe_unwrap_results(model_results) raise AttributeError("model_results object %s does not have a " "get_influence method." % results.__class__.__name__) resid = infl().resid_studentized_external if order: idx = np.abs(resid).argsort()[::-1] resid = resid[idx] if labels is not None: labels = np.array(labels)[idx].tolist() df = model_results.df_resid - 1 unadj_p = stats.t.sf(np.abs(resid), df) * 2 adj_p = multipletests(unadj_p, alpha=alpha, method=method) data = np.c_[resid, unadj_p, adj_p[1]] if labels is None: labels = getattr(model_results.model.data, 'row_labels', None) if labels is not None: from pandas import DataFrame return DataFrame(data, columns=['student_resid', 'unadj_p', method+"(p)"], index=labels) return data #influence measures def reset_ramsey(res, degree=5): '''Ramsey's RESET specification test for linear models This is a general specification test, for additional non-linear effects in a model. Notes ----- The test fits an auxiliary OLS regression where the design matrix, exog, is augmented by powers 2 to degree of the fitted values. Then it performs an F-test whether these additional terms are significant. If the p-value of the f-test is below a threshold, e.g. 0.1, then this indicates that there might be additional non-linear effects in the model and that the linear model is mis-specified. References ---------- http://en.wikipedia.org/wiki/Ramsey_RESET_test ''' order = degree + 1 k_vars = res.model.exog.shape[1] #vander without constant and x: y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant exog = np.column_stack((res.model.exog, y_fitted_vander)) res_aux = OLS(res.model.endog, exog).fit() #r_matrix = np.eye(degree, exog.shape[1], k_vars) r_matrix = np.eye(degree-1, exog.shape[1], k_vars) #df1 = degree - 1 #df2 = exog.shape[0] - degree - res.df_model (without constant) return res_aux.f_test(r_matrix) #, r_matrix, res_aux def variance_inflation_factor(exog, exog_idx): '''variance inflation factor, VIF, for one exogenous variable The variance inflation factor is a measure for the increase of the variance of the parameter estimates if an additional variable, given by exog_idx is added to the linear regression. It is a measure for multicollinearity of the design matrix, exog. One recommendation is that if VIF is greater than 5, then the explanatory variable given by exog_idx is highly collinear with the other explanatory variables, and the parameter estimates will have large standard errors because of this. Parameters ---------- exog : ndarray, (nobs, k_vars) design matrix with all explanatory variables, as for example used in regression exog_idx : int index of the exogenous variable in the columns of exog Returns ------- vif : float variance inflation factor Notes ----- This function does not save the auxiliary regression. See Also -------- xxx : class for regression diagnostics TODO: doesn't exist yet References ---------- http://en.wikipedia.org/wiki/Variance_inflation_factor ''' k_vars = exog.shape[1] x_i = exog[:, exog_idx] mask = np.arange(k_vars) != exog_idx x_noti = exog[:, mask] r_squared_i = OLS(x_i, x_noti).fit().rsquared vif = 1. / (1. - r_squared_i) return vif class OLSInfluence(object): '''class to calculate outlier and influence measures for OLS result Parameters ---------- results : Regression Results instance currently assumes the results are from an OLS regression Notes ----- One part of the results can be calculated without any auxiliary regression (some of which have the `_internal` postfix in the name. Other statistics require leave-one-observation-out (LOOO) auxiliary regression, and will be slower (mainly results with `_external` postfix in the name). The auxiliary LOOO regression only the required results are stored. Using the LOO measures is currently only recommended if the data set is not too large. One possible approach for LOOO measures would be to identify possible problem observations with the _internal measures, and then run the leave-one-observation-out only with observations that are possible outliers. (However, this is not yet available in an automized way.) This should be extended to general least squares. The leave-one-variable-out (LOVO) auxiliary regression are currently not used. ''' def __init__(self, results): #check which model is allowed self.results = maybe_unwrap_results(results) self.nobs, self.k_vars = results.model.exog.shape self.endog = results.model.endog self.exog = results.model.exog self.model_class = results.model.__class__ self.sigma_est = np.sqrt(results.mse_resid) self.aux_regression_exog = {} self.aux_regression_endog = {} @cache_readonly def hat_matrix_diag(self): '''(cached attribute) diagonal of the hat_matrix for OLS Notes ----- temporarily calculated here, this should go to model class ''' return (self.exog * self.results.model.pinv_wexog.T).sum(1) @cache_readonly def resid_press(self): '''(cached attribute) PRESS residuals ''' hii = self.hat_matrix_diag return self.results.resid / (1 - hii) @cache_readonly def influence(self): '''(cached attribute) influence measure matches the influence measure that gretl reports u * h / (1 - h) where u are the residuals and h is the diagonal of the hat_matrix ''' hii = self.hat_matrix_diag return self.results.resid * hii / (1 - hii) @cache_readonly def hat_diag_factor(self): '''(cached attribute) factor of diagonal of hat_matrix used in influence this might be useful for internal reuse h / (1 - h) ''' hii = self.hat_matrix_diag return hii / (1 - hii) @cache_readonly def ess_press(self): '''(cached attribute) error sum of squares of PRESS residuals ''' return np.dot(self.resid_press, self.resid_press) @cache_readonly def resid_studentized_internal(self): '''(cached attribute) studentized residuals using variance from OLS this uses sigma from original estimate does not require leave one out loop ''' return self.get_resid_studentized_external(sigma=None) #return self.results.resid / self.sigma_est @cache_readonly def resid_studentized_external(self): '''(cached attribute) studentized residuals using LOOO variance this uses sigma from leave-one-out estimates requires leave one out loop for observations ''' sigma_looo = np.sqrt(self.sigma2_not_obsi) return self.get_resid_studentized_external(sigma=sigma_looo) def get_resid_studentized_external(self, sigma=None): '''calculate studentized residuals Parameters ---------- sigma : None or float estimate of the standard deviation of the residuals. If None, then the estimate from the regression results is used. Returns ------- stzd_resid : ndarray studentized residuals Notes ----- studentized residuals are defined as :: resid / sigma / np.sqrt(1 - hii) where resid are the residuals from the regression, sigma is an estimate of the standard deviation of the residuals, and hii is the diagonal of the hat_matrix. ''' hii = self.hat_matrix_diag if sigma is None: sigma2_est = self.results.mse_resid #can be replace by different estimators of sigma sigma = np.sqrt(sigma2_est) return self.results.resid / sigma / np.sqrt(1 - hii) @cache_readonly def dffits_internal(self): '''(cached attribute) dffits measure for influence of an observation based on resid_studentized_internal uses original results, no nobs loop ''' #TODO: do I want to use different sigma estimate in # resid_studentized_external # -> move definition of sigma_error to the __init__ hii = self.hat_matrix_diag dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii)) dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs) return dffits_, dffits_threshold @cache_readonly def dffits(self): '''(cached attribute) dffits measure for influence of an observation based on resid_studentized_external, uses results from leave-one-observation-out loop It is recommended that observations with dffits large than a threshold of 2 sqrt{k / n} where k is the number of parameters, should be investigated. Returns ------- dffits: float dffits_threshold : float References ---------- `Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_ ''' #TODO: do I want to use different sigma estimate in # resid_studentized_external # -> move definition of sigma_error to the __init__ hii = self.hat_matrix_diag dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii)) dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs) return dffits_, dffits_threshold @cache_readonly def dfbetas(self): '''(cached attribute) dfbetas uses results from leave-one-observation-out loop ''' dfbetas = self.results.params - self.params_not_obsi#[None,:] dfbetas /= np.sqrt(self.sigma2_not_obsi[:,None]) dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params)) return dfbetas @cache_readonly def sigma2_not_obsi(self): '''(cached attribute) error variance for all LOOO regressions This is 'mse_resid' from each auxiliary regression. uses results from leave-one-observation-out loop ''' return np.asarray(self._res_looo['mse_resid']) @cache_readonly def params_not_obsi(self): '''(cached attribute) parameter estimates for all LOOO regressions uses results from leave-one-observation-out loop ''' return np.asarray(self._res_looo['params']) @cache_readonly def det_cov_params_not_obsi(self): '''(cached attribute) determinant of cov_params of all LOOO regressions uses results from leave-one-observation-out loop ''' return np.asarray(self._res_looo['det_cov_params']) @cache_readonly def cooks_distance(self): '''(cached attribute) Cooks distance uses original results, no nobs loop ''' hii = self.hat_matrix_diag #Eubank p.93, 94 cooks_d2 = self.resid_studentized_internal**2 / self.k_vars cooks_d2 *= hii / (1 - hii) from scipy import stats #alpha = 0.1 #print stats.f.isf(1-alpha, n_params, res.df_modelwc) pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid) return cooks_d2, pvals @cache_readonly def cov_ratio(self): '''(cached attribute) covariance ratio between LOOO and original This uses determinant of the estimate of the parameter covariance from leave-one-out estimates. requires leave one out loop for observations ''' #don't use inplace division / because then we change original cov_ratio = (self.det_cov_params_not_obsi / np.linalg.det(self.results.cov_params())) return cov_ratio @cache_readonly def resid_var(self): '''(cached attribute) estimate of variance of the residuals :: sigma2 = sigma2_OLS * (1 - hii) where hii is the diagonal of the hat matrix ''' #TODO:check if correct outside of ols return self.results.mse_resid * (1 - self.hat_matrix_diag) @cache_readonly def resid_std(self): '''(cached attribute) estimate of standard deviation of the residuals See Also -------- resid_var ''' return np.sqrt(self.resid_var) def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True): '''regression results from LOVO auxiliary regression with cache The result instances are stored, which could use a large amount of memory if the datasets are large. There are too many combinations to store them all, except for small problems. Parameters ---------- drop_idx : int index of exog that is dropped from the regression endog_idx : 'endog' or int If 'endog', then the endogenous variable of the result instance is regressed on the exogenous variables, excluding the one at drop_idx. If endog_idx is an integer, then the exog with that index is regressed with OLS on all other exogenous variables. (The latter is the auxiliary regression for the variance inflation factor.) this needs more thought, memory versus speed not yet used in any other parts, not sufficiently tested ''' #reverse the structure, access store, if fail calculate ? #this creates keys in store even if store = false ! bug if endog_idx == 'endog': stored = self.aux_regression_endog if hasattr(stored, drop_idx): return stored[drop_idx] x_i = self.results.model.endog else: #nested dictionary try: self.aux_regression_exog[endog_idx][drop_idx] except KeyError: pass stored = self.aux_regression_exog[endog_idx] stored = {} x_i = self.exog[:, endog_idx] k_vars = self.exog.shape[1] mask = np.arange(k_vars) != drop_idx x_noti = self.exog[:, mask] res = OLS(x_i, x_noti).fit() if store: stored[drop_idx] = res return res def _get_drop_vari(self, attributes): '''regress endog on exog without one of the variables This uses a k_vars loop, only attributes of the OLS instance are stored. Parameters ---------- attributes : list of strings These are the names of the attributes of the auxiliary OLS results instance that are stored and returned. not yet used ''' from statsmodels.sandbox.tools.cross_val import LeaveOneOut endog = self.results.model.endog exog = self.exog cv_iter = LeaveOneOut(self.k_vars) res_loo = defaultdict(list) for inidx, outidx in cv_iter: for att in attributes: res_i = self.model_class(endog, exog[:,inidx]).fit() res_loo[att].append(getattr(res_i, att)) return res_loo @cache_readonly def _res_looo(self): '''collect required results from the LOOO loop all results will be attached. currently only 'params', 'mse_resid', 'det_cov_params' are stored regresses endog on exog dropping one observation at a time this uses a nobs loop, only attributes of the OLS instance are stored. ''' from statsmodels.sandbox.tools.cross_val import LeaveOneOut get_det_cov_params = lambda res: np.linalg.det(res.cov_params()) endog = self.endog exog = self.exog params = np.zeros(exog.shape, dtype=np.float) mse_resid = np.zeros(endog.shape, dtype=np.float) det_cov_params = np.zeros(endog.shape, dtype=np.float) cv_iter = LeaveOneOut(self.nobs) for inidx, outidx in cv_iter: res_i = self.model_class(endog[inidx], exog[inidx]).fit() params[outidx] = res_i.params mse_resid[outidx] = res_i.mse_resid det_cov_params[outidx] = get_det_cov_params(res_i) return dict(params=params, mse_resid=mse_resid, det_cov_params=det_cov_params) def summary_frame(self): """ Creates a DataFrame with all available influence results. Returns ------- frame : DataFrame A DataFrame with all results. Notes ----- The resultant DataFrame contains six variables in addition to the DFBETAS. These are: * cooks_d : Cook's Distance defined in `Influence.cooks_distance` * standard_resid : Standardized residuals defined in `Influence.resid_studentized_internal` * hat_diag : The diagonal of the projection, or hat, matrix defined in `Influence.hat_matrix_diag` * dffits_internal : DFFITS statistics using internally Studentized residuals defined in `Influence.dffits_internal` * dffits : DFFITS statistics using externally Studentized residuals defined in `Influence.dffits` * student_resid : Externally Studentized residuals defined in `Influence.resid_studentized_external` """ from pandas import DataFrame # row and column labels data = self.results.model.data row_labels = data.row_labels beta_labels = ['dfb_' + i for i in data.xnames] # grab the results summary_data = DataFrame(dict( cooks_d = self.cooks_distance[0], standard_resid = self.resid_studentized_internal, hat_diag = self.hat_matrix_diag, dffits_internal = self.dffits_internal[0], student_resid = self.resid_studentized_external, dffits = self.dffits[0], ), index = row_labels) #NOTE: if we don't give columns, order of above will be arbitrary dfbeta = DataFrame(self.dfbetas, columns=beta_labels, index=row_labels) return dfbeta.join(summary_data) def summary_table(self, float_fmt="%6.3f"): '''create a summary table with all influence and outlier measures This does currently not distinguish between statistics that can be calculated from the original regression results and for which a leave-one-observation-out loop is needed Returns ------- res : SimpleTable instance SimpleTable instance with the results, can be printed Notes ----- This also attaches table_data to the instance. ''' #print self.dfbetas # table_raw = [ np.arange(self.nobs), # self.endog, # self.fittedvalues, # self.cooks_distance(), # self.resid_studentized_internal, # self.hat_matrix_diag, # self.dffits_internal, # self.resid_studentized_external, # self.dffits, # self.dfbetas # ] table_raw = [ ('obs', np.arange(self.nobs)), ('endog', self.endog), ('fitted\nvalue', self.results.fittedvalues), ("Cook's\nd", self.cooks_distance[0]), ("student.\nresidual", self.resid_studentized_internal), ('hat diag', self.hat_matrix_diag), ('dffits \ninternal', self.dffits_internal[0]), ("ext.stud.\nresidual", self.resid_studentized_external), ('dffits', self.dffits[0]) ] colnames, data = lzip(*table_raw) #unzip data = np.column_stack(data) self.table_data = data from statsmodels.iolib.table import SimpleTable, default_html_fmt from statsmodels.iolib.tableformatting import fmt_base from copy import deepcopy fmt = deepcopy(fmt_base) fmt_html = deepcopy(default_html_fmt) fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1) #fmt_html['data_fmts'] = fmt['data_fmts'] return SimpleTable(data, headers=colnames, txt_fmt=fmt, html_fmt=fmt_html) def summary_table(res, alpha=0.05): '''generate summary table of outlier and influence similar to SAS Parameters ---------- alpha : float significance level for confidence interval Returns ------- st : SimpleTable instance table with results that can be printed data : ndarray calculated measures and statistics for the table ss2 : list of strings column_names for table (Note: rows of table are observations) ''' from scipy import stats from statsmodels.sandbox.regression.predstd import wls_prediction_std infl = OLSInfluence(res) #standard error for predicted mean #Note: using hat_matrix only works for fitted values predict_mean_se = np.sqrt(infl.hat_matrix_diag*res.mse_resid) tppf = stats.t.isf(alpha/2., res.df_resid) predict_mean_ci = np.column_stack([ res.fittedvalues - tppf * predict_mean_se, res.fittedvalues + tppf * predict_mean_se]) #standard error for predicted observation predict_se, predict_ci_low, predict_ci_upp = wls_prediction_std(res) predict_ci = np.column_stack((predict_ci_low, predict_ci_upp)) #standard deviation of residual resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag)) table_sm = np.column_stack([ np.arange(res.nobs) + 1, res.model.endog, res.fittedvalues, predict_mean_se, predict_mean_ci[:,0], predict_mean_ci[:,1], predict_ci[:,0], predict_ci[:,1], res.resid, resid_se, infl.resid_studentized_internal, infl.cooks_distance[0] ]) #colnames, data = lzip(*table_raw) #unzip data = table_sm ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue', 'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp', 'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual', 'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"] colnames = ss2 #self.table_data = data #data = np.column_stack(data) from statsmodels.iolib.table import SimpleTable, default_html_fmt from statsmodels.iolib.tableformatting import fmt_base from copy import deepcopy fmt = deepcopy(fmt_base) fmt_html = deepcopy(default_html_fmt) fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1) #fmt_html['data_fmts'] = fmt['data_fmts'] st = SimpleTable(data, headers=colnames, txt_fmt=fmt, html_fmt=fmt_html) return st, data, ss2
bsd-3-clause
jreback/pandas
pandas/tests/test_optional_dependency.py
3
1518
import sys import types import pytest from pandas.compat._optional import VERSIONS, import_optional_dependency import pandas._testing as tm def test_import_optional(): match = "Missing .*notapackage.* pip .* conda .* notapackage" with pytest.raises(ImportError, match=match): import_optional_dependency("notapackage") result = import_optional_dependency("notapackage", raise_on_missing=False) assert result is None def test_xlrd_version_fallback(): pytest.importorskip("xlrd") import_optional_dependency("xlrd") def test_bad_version(monkeypatch): name = "fakemodule" module = types.ModuleType(name) module.__version__ = "0.9.0" sys.modules[name] = module monkeypatch.setitem(VERSIONS, name, "1.0.0") match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" with pytest.raises(ImportError, match=match): import_optional_dependency("fakemodule") with tm.assert_produces_warning(UserWarning): result = import_optional_dependency("fakemodule", on_version="warn") assert result is None module.__version__ = "1.0.0" # exact match is OK result = import_optional_dependency("fakemodule") assert result is module def test_no_version_raises(monkeypatch): name = "fakemodule" module = types.ModuleType(name) sys.modules[name] = module monkeypatch.setitem(VERSIONS, name, "1.0.0") with pytest.raises(ImportError, match="Can't determine .* fakemodule"): import_optional_dependency(name)
bsd-3-clause
saketkc/statsmodels
statsmodels/sandbox/examples/ex_mixed_lls_re.py
34
5393
# -*- coding: utf-8 -*- """Example using OneWayMixed Created on Sat Dec 03 10:15:55 2011 Author: Josef Perktold This example constructs a linear model with individual specific random effects, and uses OneWayMixed to estimate it. This is a variation on ex_mixed_lls_0.py. Here we only have a single individual specific constant, that is just a random effect without exogenous regressors. """ import numpy as np from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit examples = ['ex1'] if 'ex1' in examples: #np.random.seed(54321) np.random.seed(978326) nsubj = 2000 units = [] nobs_i = 4 #number of observations per unit, changed below nx = 0 #number fixed effects nz = 1 ##number random effects beta = np.ones(nx) gamma = 0.5 * np.ones(nz) #mean of random effect gamma[0] = 0 gamma_re_true = [] for i in range(nsubj): #create data for one unit #random effect/coefficient gamma_re = gamma + 0.2 * np.random.standard_normal(nz) #store true parameter for checking gamma_re_true.append(gamma_re) #for testing unbalanced case, let's change nobs per unit if i > nsubj//4: nobs_i = 6 #generate exogenous variables X = np.random.standard_normal((nobs_i, nx)) Z = np.random.standard_normal((nobs_i, nz-1)) Z = np.column_stack((np.ones(nobs_i), Z)) noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1 #generate endogenous variable Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise #add random effect design matrix also to fixed effects to #capture the mean #this seems to be necessary to force mean of RE to zero !? #(It's not required for estimation but interpretation of random #effects covariance matrix changes - still need to check details. X = np.hstack((X,Z)) #create units and append to list unit = Unit(Y, X, Z) units.append(unit) m = OneWayMixed(units) import time t0 = time.time() m.initialize() res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6) t1 = time.time() print('time for initialize and fit', t1-t0) print('number of iterations', m.iterations) #print dir(m) #print vars(m) print('\nestimates for fixed effects') print(m.a) print(m.params) bfixed_cov = m.cov_fixed() print('beta fixed standard errors') print(np.sqrt(np.diag(bfixed_cov))) print(m.bse) b_re = m.params_random_units print('RE mean:', b_re.mean(0)) print('RE columns std', b_re.std(0)) print('np.cov(b_re, rowvar=0), sample statistic') print(np.cov(b_re, rowvar=0)) print('std of above') #need atleast_1d or diag raises exception print(np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0))))) print('m.cov_random()') print(m.cov_random()) print('std of above') print(res.std_random()) print(np.sqrt(np.diag(m.cov_random()))) print('\n(non)convergence of llf') print(m.history['llf'][-4:]) print('convergence of parameters') #print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0) print(np.diff(np.vstack(m.history['params'][-4:]),axis=0)) print('convergence of D') print(np.diff(np.array(m.history['D'][-4:]), axis=0)) #zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units]) zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units]) '''if Z is not included in X: >>> np.dot(b_re.T, b_re)/100 array([[ 0.03270611, -0.00916051], [-0.00916051, 0.26432783]]) >>> m.cov_random() array([[ 0.0348722 , -0.00909159], [-0.00909159, 0.26846254]]) >>> #note cov_random doesn't subtract mean! ''' print('\nchecking the random effects distribution and prediction') gamma_re_true = np.array(gamma_re_true) print('mean of random effect true', gamma_re_true.mean(0)) print('mean from fixed effects ', m.params[-2:]) print('mean of estimated RE ', b_re.mean(0)) print() absmean_true = np.abs(gamma_re_true).mean(0) mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100 mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \ / absmean_true*100 median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \ / absmean_true*100 rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \ / absmean_true*100 print('mape ', mape) print('mean_abs_perc ', mean_abs_perc) print('median_abs_perc', median_abs_perc) print('rmse_perc (std)', rmse_perc) from numpy.testing import assert_almost_equal #assert is for n_units=100 in original example #I changed random number generation, so this won't work anymore #assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8) #now returns res print('llf', res.llf) #based on MLE, does not include constant print('tvalues', res.tvalues) print('pvalues', res.pvalues) print(res.t_test([1])) print('test mean of both random effects variables is zero') print(res.f_test([[1]])) plots = res.plot_random_univariate(bins=50) #fig = res.plot_scatter_pairs(0, 1) #no pairs import matplotlib.pyplot as plt plt.show()
bsd-3-clause
JsNoNo/scikit-learn
sklearn/neighbors/nearest_centroid.py
199
7249
# -*- coding: utf-8 -*- """ Nearest Centroid Classification """ # Author: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # # License: BSD 3 clause import warnings import numpy as np from scipy import sparse as sp from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import pairwise_distances from ..preprocessing import LabelEncoder from ..utils.validation import check_array, check_X_y, check_is_fitted from ..utils.sparsefuncs import csc_median_axis_0 class NearestCentroid(BaseEstimator, ClassifierMixin): """Nearest centroid classifier. Each class is represented by its centroid, with test samples classified to the class with the nearest centroid. Read more in the :ref:`User Guide <nearest_centroid_classifier>`. Parameters ---------- metric: string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances (according to the metric) of all samples that belong to that particular class are minimized. If the "manhattan" metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean. shrink_threshold : float, optional (default = None) Threshold for shrinking centroids to remove features. Attributes ---------- centroids_ : array-like, shape = [n_classes, n_features] Centroid of each class Examples -------- >>> from sklearn.neighbors.nearest_centroid import NearestCentroid >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = NearestCentroid() >>> clf.fit(X, y) NearestCentroid(metric='euclidean', shrink_threshold=None) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier Notes ----- When used for text classification with tf-idf vectors, this classifier is also known as the Rocchio classifier. References ---------- Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of multiple cancer types by shrunken centroids of gene expression. Proceedings of the National Academy of Sciences of the United States of America, 99(10), 6567-6572. The National Academy of Sciences. """ def __init__(self, metric='euclidean', shrink_threshold=None): self.metric = metric self.shrink_threshold = shrink_threshold def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array, shape = [n_samples] Target values (integers) """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == 'manhattan': X, y = check_X_y(X, y, ['csc']) else: X, y = check_X_y(X, y, ['csr', 'csc']) is_X_sparse = sp.issparse(X) if is_X_sparse and self.shrink_threshold: raise ValueError("threshold shrinking not supported" " for sparse input") n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError('y has less than 2 classes') # Mask mapping each class to it's members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] # XXX: Update other averaging methods according to the metrics. if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: if self.metric != 'euclidean': warnings.warn("Averaging for metrics other than " "euclidean and manhattan not supported. " "The average is set to be the mean." ) self.centroids_[cur_class] = X[center_mask].mean(axis=0) if self.shrink_threshold: dataset_centroid_ = np.mean(X, axis=0) # m parameter for determining deviation m = np.sqrt((1. / nk) + (1. / n_samples)) # Calculate deviation using the standard deviation of centroids. variance = (X - self.centroids_[y_ind]) ** 2 variance = variance.sum(axis=0) s = np.sqrt(variance / (n_samples - n_classes)) s += np.median(s) # To deter outliers from affecting the results. mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s deviation = ((self.centroids_ - dataset_centroid_) / ms) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. signs = np.sign(deviation) deviation = (np.abs(deviation) - self.shrink_threshold) deviation[deviation < 0] = 0 deviation *= signs # Now adjust the centroids using the deviation msd = ms * deviation self.centroids_ = dataset_centroid_[np.newaxis, :] + msd return self def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Notes ----- If the metric constructor parameter is "precomputed", X is assumed to be the distance matrix between the data to be predicted and ``self.centroids_``. """ check_is_fitted(self, 'centroids_') X = check_array(X, accept_sparse='csr') return self.classes_[pairwise_distances( X, self.centroids_, metric=self.metric).argmin(axis=1)]
bsd-3-clause
ryfeus/lambda-packs
pytorch/source/numpy/lib/npyio.py
3
84661
from __future__ import division, absolute_import, print_function import sys import os import re import functools import itertools import warnings import weakref from operator import itemgetter, index as opindex import numpy as np from . import format from ._datasource import DataSource from numpy.core import overrides from numpy.core.multiarray import packbits, unpackbits from numpy.core.overrides import set_module from numpy.core._internal import recursive from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, flatten_dtype, easy_dtype, _decode_line ) from numpy.compat import ( asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode, os_fspath, os_PathLike ) from numpy.core.numeric import pickle if sys.version_info[0] >= 3: from collections.abc import Mapping else: from future_builtins import map from collections import Mapping @set_module('numpy') def loads(*args, **kwargs): # NumPy 1.15.0, 2017-12-10 warnings.warn( "np.loads is deprecated, use pickle.loads instead", DeprecationWarning, stacklevel=2) return pickle.loads(*args, **kwargs) __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' ] array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') class BagObj(object): """ BagObj(obj) Convert attribute look-ups to getitems on the object passed in. Parameters ---------- obj : class instance Object on which attribute look-up is performed. Examples -------- >>> from numpy.lib.npyio import BagObj as BO >>> class BagDemo(object): ... def __getitem__(self, key): # An instance of BagObj(BagDemo) ... # will call this method when any ... # attribute look-up is required ... result = "Doesn't matter what you want, " ... return result + "you're gonna get this" ... >>> demo_obj = BagDemo() >>> bagobj = BO(demo_obj) >>> bagobj.hello_there "Doesn't matter what you want, you're gonna get this" >>> bagobj.I_can_be_anything "Doesn't matter what you want, you're gonna get this" """ def __init__(self, obj): # Use weakref to make NpzFile objects collectable by refcount self._obj = weakref.proxy(obj) def __getattribute__(self, key): try: return object.__getattribute__(self, '_obj')[key] except KeyError: raise AttributeError(key) def __dir__(self): """ Enables dir(bagobj) to list the files in an NpzFile. This also enables tab-completion in an interpreter or IPython. """ return list(object.__getattribute__(self, '_obj').keys()) def zipfile_factory(file, *args, **kwargs): """ Create a ZipFile. Allows for Zip64, and the `file` argument can accept file, str, or pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile constructor. """ if not hasattr(file, 'read'): file = os_fspath(file) import zipfile kwargs['allowZip64'] = True return zipfile.ZipFile(file, *args, **kwargs) class NpzFile(Mapping): """ NpzFile(fid) A dictionary-like object with lazy-loading of files in the zipped archive provided on construction. `NpzFile` is used to load files in the NumPy ``.npz`` data archive format. It assumes that files in the archive have a ``.npy`` extension, other files are ignored. The arrays and file strings are lazily loaded on either getitem access using ``obj['key']`` or attribute lookup using ``obj.f.key``. A list of all files (without ``.npy`` extensions) can be obtained with ``obj.files`` and the ZipFile object itself using ``obj.zip``. Attributes ---------- files : list of str List of all files in the archive with a ``.npy`` extension. zip : ZipFile instance The ZipFile object initialized with the zipped archive. f : BagObj instance An object on which attribute can be performed as an alternative to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Parameters ---------- fid : file or str The zipped archive to open. This is either a file-like object or a string containing the path to the archive. own_fid : bool, optional Whether NpzFile should close the file handle. Requires that `fid` is a file-like object. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True >>> npz.files ['y', 'x'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ def __init__(self, fid, own_fid=False, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) self._files = _zip.namelist() self.files = [] self.allow_pickle = allow_pickle self.pickle_kwargs = pickle_kwargs for x in self._files: if x.endswith('.npy'): self.files.append(x[:-4]) else: self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: self.fid = fid else: self.fid = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): """ Close the file. """ if self.zip is not None: self.zip.close() self.zip = None if self.fid is not None: self.fid.close() self.fid = None self.f = None # break reference cycle def __del__(self): self.close() # Implement the Mapping ABC def __iter__(self): return iter(self.files) def __len__(self): return len(self.files) def __getitem__(self, key): # FIXME: This seems like it will copy strings around # more than is strictly necessary. The zipfile # will read the string and then # the format.read_array will copy the string # to another place in memory. # It would be better if the zipfile could read # (or at least uncompress) the data # directly into the array memory. member = False if key in self._files: member = True elif key in self.files: member = True key += '.npy' if member: bytes = self.zip.open(key) magic = bytes.read(len(format.MAGIC_PREFIX)) bytes.close() if magic == format.MAGIC_PREFIX: bytes = self.zip.open(key) return format.read_array(bytes, allow_pickle=self.allow_pickle, pickle_kwargs=self.pickle_kwargs) else: return self.zip.read(key) else: raise KeyError("%s is not a file in the archive" % key) if sys.version_info.major == 3: # deprecate the python 2 dict apis that we supported by accident in # python 3. We forgot to implement itervalues() at all in earlier # versions of numpy, so no need to deprecated it here. def iteritems(self): # Numpy 1.15, 2018-02-20 warnings.warn( "NpzFile.iteritems is deprecated in python 3, to match the " "removal of dict.itertems. Use .items() instead.", DeprecationWarning, stacklevel=2) return self.items() def iterkeys(self): # Numpy 1.15, 2018-02-20 warnings.warn( "NpzFile.iterkeys is deprecated in python 3, to match the " "removal of dict.iterkeys. Use .keys() instead.", DeprecationWarning, stacklevel=2) return self.keys() @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII'): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. Parameters ---------- file : file-like object, string, or pathlib.Path The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap` for a detailed description of the modes). A memory-mapped array is kept on disk. However, it can be accessed and sliced like any ndarray. Memory mapping is especially useful for accessing small fragments of large files without reading the entire file into memory. allow_pickle : bool, optional Allow loading pickled object arrays stored in npy files. Reasons for disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: True fix_imports : bool, optional Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when loading Python 2 generated pickled files in Python 3, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' Returns ------- result : array, tuple, dict, etc. Data stored in the file. For ``.npz`` files, the returned instance of NpzFile class must be closed to avoid leaking file descriptors. Raises ------ IOError If the input file does not exist or cannot be read. ValueError The file contains an object array, but allow_pickle=False given. See Also -------- save, savez, savez_compressed, loadtxt memmap : Create a memory-map to an array stored in a file on disk. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. Notes ----- - If the file contains pickle data, then whatever object is stored in the pickle is returned. - If the file is a ``.npy`` file, then a single array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is returned, containing ``{filename: array}`` key-value pairs, one for each file in the archive. - If the file is a ``.npz`` file, the returned value supports the context manager protocol in a similar fashion to the open function:: with load('foo.npz') as data: a = data['a'] The underlying file descriptor is closed when exiting the 'with' block. Examples -------- Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') array([[1, 2, 3], [4, 5, 6]]) Store compressed data to disk, and load it again: >>> a=np.array([[1, 2, 3], [4, 5, 6]]) >>> b=np.array([1, 2]) >>> np.savez('/tmp/123.npz', a=a, b=b) >>> data = np.load('/tmp/123.npz') >>> data['a'] array([[1, 2, 3], [4, 5, 6]]) >>> data['b'] array([1, 2]) >>> data.close() Mem-map the stored array, and then access the second row directly from disk: >>> X = np.load('/tmp/123.npy', mmap_mode='r') >>> X[1, :] memmap([4, 5, 6]) """ if encoding not in ('ASCII', 'latin1', 'bytes'): # The 'encoding' value for pickle also affects what encoding # the serialized binary data of NumPy arrays is loaded # in. Pickle does not pass on the encoding information to # NumPy. The unpickling code in numpy.core.multiarray is # written to assume that unicode data appearing where binary # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. # # Other encoding values can corrupt binary data, and we # purposefully disallow them. For the same reason, the errors= # argument is not exposed, as values other than 'strict' # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") if sys.version_info[0] >= 3: pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = {} # TODO: Use contextlib.ExitStack once we drop Python 2 if hasattr(file, 'read'): fid = file own_fid = False else: fid = open(os_fspath(file), "rb") own_fid = True try: # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = b'PK\x03\x04' _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this N = len(format.MAGIC_PREFIX) magic = fid.read(N) # If the file size is less than N, we need to make sure not # to seek past the beginning of the file fid.seek(-min(N, len(magic)), 1) # back-up if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): # zip-file (assume .npz) # Transfer file ownership to NpzFile ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) own_fid = False return ret elif magic == format.MAGIC_PREFIX: # .npy file if mmap_mode: return format.open_memmap(file, mode=mmap_mode) else: return format.read_array(fid, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Try a pickle if not allow_pickle: raise ValueError("Cannot load file containing pickled data " "when allow_pickle=False") try: return pickle.load(fid, **pickle_kwargs) except Exception: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: if own_fid: fid.close() def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): return (arr,) @array_function_dispatch(_save_dispatcher) def save(file, arr, allow_pickle=True, fix_imports=True): """ Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- file : file, str, or pathlib.Path File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string or Path, a ``.npy`` extension will be appended to the file name if it does not already have one. arr : array_like Array data to be saved. allow_pickle : bool, optional Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be pickled in a Python 2 compatible way. If `fix_imports` is True, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. See Also -------- savez : Save several arrays into a ``.npz`` archive savetxt, load Notes ----- For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ own_fid = False if hasattr(file, 'read'): fid = file else: file = os_fspath(file) if not file.endswith('.npy'): file = file + '.npy' fid = open(file, "wb") own_fid = True if sys.version_info[0] >= 3: pickle_kwargs = dict(fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = None try: arr = np.asanyarray(arr) format.write_array(fid, arr, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) finally: if own_fid: fid.close() def _savez_dispatcher(file, *args, **kwds): for a in args: yield a for v in kwds.values(): yield v @array_function_dispatch(_savez_dispatcher) def savez(file, *args, **kwds): """ Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- save : Save a single array to a binary file in NumPy format. savetxt : Save an array to a file as plain text. savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files ['arr_1', 'arr_0'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Using `savez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npzfile = np.load(outfile) >>> npzfile.files ['y', 'x'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ _savez(file, args, kwds, False) def _savez_compressed_dispatcher(file, *args, **kwds): for a in args: yield a for v in kwds.values(): yield v @array_function_dispatch(_savez_compressed_dispatcher) def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- numpy.save : Save a single array to a binary file in NumPy format. numpy.savetxt : Save an array to a file as plain text. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format numpy.load : Load the files created by savez_compressed. Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is compressed with ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) >>> loaded = np.load('/tmp/123.npz') >>> print(np.array_equal(test_array, loaded['a'])) True >>> print(np.array_equal(test_vector, loaded['b'])) True """ _savez(file, args, kwds, True) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile if not hasattr(file, 'read'): file = os_fspath(file) if not file.endswith('.npz'): file = file + '.npz' namedict = kwds for i, val in enumerate(args): key = 'arr_%d' % i if key in namedict.keys(): raise ValueError( "Cannot use un-named variables and keyword %s" % key) namedict[key] = val if compress: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) if sys.version_info >= (3, 6): # Since Python 3.6 it is possible to write directly to a ZIP file. for key, val in namedict.items(): fname = key + '.npy' val = np.asanyarray(val) force_zip64 = val.nbytes >= 2**30 with zipf.open(fname, 'w', force_zip64=force_zip64) as fid: format.write_array(fid, val, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Stage arrays in a temporary file on disk, before writing to zip. # Import deferred for startup time improvement import tempfile # Since target file might be big enough to exceed capacity of a global # temporary directory, create temp file side-by-side with the target file. file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: format.write_array(fid, np.asanyarray(val), allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) fid.close() fid = None zipf.write(tmpfile, arcname=fname) except IOError as exc: raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) finally: if fid: fid.close() finally: os.remove(tmpfile) zipf.close() def _getconv(dtype): """ Find the correct dtype converter. Adapted from matplotlib """ def floatconv(x): x.lower() if '0x' in x: return float.fromhex(x) return float(x) typ = dtype.type if issubclass(typ, np.bool_): return lambda x: bool(int(x)) if issubclass(typ, np.uint64): return np.uint64 if issubclass(typ, np.int64): return np.int64 if issubclass(typ, np.integer): return lambda x: int(float(x)) elif issubclass(typ, np.longdouble): return np.longdouble elif issubclass(typ, np.floating): return floatconv elif issubclass(typ, complex): return lambda x: complex(asstr(x).replace('+-', '-')) elif issubclass(typ, np.bytes_): return asbytes elif issubclass(typ, np.unicode_): return asunicode else: return asstr # amount of lines loadtxt reads in one chunk, can be overridden for testing _loadtxt_chunksize = 50000 @set_module('numpy') def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0, encoding='bytes', max_rows=None): """ Load data from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : file, str, or pathlib.Path File, filename, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators should return byte strings for Python 3k. dtype : data-type, optional Data-type of the resulting array; default: float. If this is a structured data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. comments : str or sequence of str, optional The characters or list of characters used to indicate the start of a comment. None implies no comments. For backwards compatibility, byte strings will be decoded as 'latin1'. The default is '#'. delimiter : str, optional The string used to separate values. For backwards compatibility, byte strings will be decoded as 'latin1'. The default is whitespace. converters : dict, optional A dictionary mapping column number to a function that will parse the column string into the desired value. E.g., if column 0 is a date string: ``converters = {0: datestr2num}``. Converters can also be used to provide a default value for missing data (but see also `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. usecols : int or sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. .. versionchanged:: 1.11.0 When a single column has to be read it is possible to use an integer instead of a tuple. E.g ``usecols = 3`` reads the fourth column the same way as ``usecols = (3,)`` would. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured data-type, arrays are returned for each field. Default is False. ndmin : int, optional The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. The special value 'bytes' enables backward compatibility workarounds that ensures you receive byte arrays as results if possible and passes 'latin1' encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 max_rows : int, optional Read `max_rows` lines of content after `skiprows` lines. The default is to read all the lines. .. versionadded:: 1.16.0 Returns ------- out : ndarray Data read from the text file. See Also -------- load, fromstring, fromregex genfromtxt : Load data with missing values handled as specified. scipy.io.loadmat : reads MATLAB data files Notes ----- This function aims to be a fast reader for simply formatted files. The `genfromtxt` function provides more sophisticated handling of, e.g., lines with missing values. .. versionadded:: 1.10.0 The strings produced by the Python float.hex method can be used as input for floats. Examples -------- >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO(u"0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) >>> d = StringIO(u"M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO(u"1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) >>> y array([ 2., 4.]) """ # Type conversions for Py3 convenience if comments is not None: if isinstance(comments, (basestring, bytes)): comments = [comments] comments = [_decode_line(x) for x in comments] # Compile regex for comments beforehand comments = (re.escape(comment) for comment in comments) regex_comments = re.compile('|'.join(comments)) if delimiter is not None: delimiter = _decode_line(delimiter) user_converters = converters if encoding == 'bytes': encoding = None byte_converters = True else: byte_converters = False if usecols is not None: # Allow usecols to be a single int or a sequence of ints try: usecols_as_list = list(usecols) except TypeError: usecols_as_list = [usecols] for col_idx in usecols_as_list: try: opindex(col_idx) except TypeError as e: e.args = ( "usecols must be an int or a sequence of ints but " "it contains at least one element of type %s" % type(col_idx), ) raise # Fall back to existing code usecols = usecols_as_list fown = False try: if isinstance(fname, os_PathLike): fname = os_fspath(fname) if _is_string_like(fname): fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) fencoding = getattr(fh, 'encoding', 'latin1') fh = iter(fh) fown = True else: fh = iter(fname) fencoding = getattr(fname, 'encoding', 'latin1') except TypeError: raise ValueError('fname must be a string, file handle, or generator') # input may be a python2 io stream if encoding is not None: fencoding = encoding # we must assume local encoding # TODO emit portability warning? elif fencoding is None: import locale fencoding = locale.getpreferredencoding() # not to be confused with the flatten_dtype we import... @recursive def flatten_dtype_internal(self, dt): """Unpack a structured data-type, and produce re-packing info.""" if dt.names is None: # If the dtype is flattened, return. # If the dtype has a shape, the dtype occurs # in the list more than once. shape = dt.shape if len(shape) == 0: return ([dt.base], None) else: packing = [(shape[-1], list)] if len(shape) > 1: for dim in dt.shape[-2::-1]: packing = [(dim*packing[0][0], packing*dim)] return ([dt.base] * int(np.prod(dt.shape)), packing) else: types = [] packing = [] for field in dt.names: tp, bytes = dt.fields[field] flat_dt, flat_packing = self(tp) types.extend(flat_dt) # Avoid extra nesting for subarrays if tp.ndim > 0: packing.extend(flat_packing) else: packing.append((len(flat_dt), flat_packing)) return (types, packing) @recursive def pack_items(self, items, packing): """Pack items into nested lists based on re-packing info.""" if packing is None: return items[0] elif packing is tuple: return tuple(items) elif packing is list: return list(items) else: start = 0 ret = [] for length, subpacking in packing: ret.append(self(items[start:start+length], subpacking)) start += length return tuple(ret) def split_line(line): """Chop off comments, strip, and split at delimiter. """ line = _decode_line(line, encoding=encoding) if comments is not None: line = regex_comments.split(line, maxsplit=1)[0] line = line.strip('\r\n') if line: return line.split(delimiter) else: return [] def read_data(chunk_size): """Parse each line, including the first. The file read, `fh`, is a global defined above. Parameters ---------- chunk_size : int At most `chunk_size` lines are read at a time, with iteration until all lines are read. """ X = [] line_iter = itertools.chain([first_line], fh) line_iter = itertools.islice(line_iter, max_rows) for i, line in enumerate(line_iter): vals = split_line(line) if len(vals) == 0: continue if usecols: vals = [vals[j] for j in usecols] if len(vals) != N: line_num = i + skiprows + 1 raise ValueError("Wrong number of columns at line %d" % line_num) # Convert each value according to its column and store items = [conv(val) for (conv, val) in zip(converters, vals)] # Then pack it according to the dtype's nesting items = pack_items(items, packing) X.append(items) if len(X) > chunk_size: yield X X = [] if X: yield X try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) defconv = _getconv(dtype) # Skip the first `skiprows` lines for i in range(skiprows): next(fh) # Read until we find a line with some values, and use # it to estimate the number of columns, N. first_vals = None try: while not first_vals: first_line = next(fh) first_vals = split_line(first_line) except StopIteration: # End of lines reached first_line = '' first_vals = [] warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2) N = len(usecols or first_vals) dtype_types, packing = flatten_dtype_internal(dtype) if len(dtype_types) > 1: # We're dealing with a structured array, each field of # the dtype matches a column converters = [_getconv(dt) for dt in dtype_types] else: # All fields have the same dtype converters = [defconv for i in range(N)] if N > 1: packing = [(N, tuple)] # By preference, use the converters specified by the user for i, conv in (user_converters or {}).items(): if usecols: try: i = usecols.index(i) except ValueError: # Unused converter specified continue if byte_converters: # converters may use decode to workaround numpy's old behaviour, # so encode the string again before passing to the user converter def tobytes_first(x, conv): if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) import functools converters[i] = functools.partial(tobytes_first, conv=conv) else: converters[i] = conv converters = [conv if conv is not bytes else lambda x: x.encode(fencoding) for conv in converters] # read data in chunks and fill it into an array via resize # over-allocating and shrinking the array later may be faster but is # probably not relevant compared to the cost of actually reading and # converting the data X = None for x in read_data(_loadtxt_chunksize): if X is None: X = np.array(x, dtype) else: nshape = list(X.shape) pos = nshape[0] nshape[0] += len(x) X.resize(nshape, refcheck=False) X[pos:, ...] = x finally: if fown: fh.close() if X is None: X = np.array([], dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) # Verify that the array has at least dimensions `ndmin`. # Check correctness of the values of `ndmin` if ndmin not in [0, 1, 2]: raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) # Tweak the size and shape of the arrays - remove extraneous dimensions if X.ndim > ndmin: X = np.squeeze(X) # and ensure we have the minimum number of dimensions asked for # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 if X.ndim < ndmin: if ndmin == 1: X = np.atleast_1d(X) elif ndmin == 2: X = np.atleast_2d(X).T if unpack: if len(dtype_types) > 1: # For structured arrays, return an array for each field. return [X[field] for field in dtype.names] else: return X.T else: return X def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, header=None, footer=None, comments=None, encoding=None): return (X,) @array_function_dispatch(_savetxt_dispatcher) def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# ', encoding=None): """ Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : 1D or 2D array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: * a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` * a full string specifying every real and imaginary part, e.g. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns * a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 encoding : {None, str}, optional Encoding used to encode the outputfile. Does not apply to output streams. If the encoding is something other than 'bytes' or 'latin1' you will not be able to load the file in NumPy versions < 1.14. Default is 'latin1'. .. versionadded:: 1.14.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <https://docs.python.org/library/string.html#format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) class WriteWrap(object): """Convert to unicode in py2 or to bytes on bytestream inputs. """ def __init__(self, fh, encoding): self.fh = fh self.encoding = encoding self.do_write = self.first_write def close(self): self.fh.close() def write(self, v): self.do_write(v) def write_bytes(self, v): if isinstance(v, bytes): self.fh.write(v) else: self.fh.write(v.encode(self.encoding)) def write_normal(self, v): self.fh.write(asunicode(v)) def first_write(self, v): try: self.write_normal(v) self.write = self.write_normal except TypeError: # input is probably a bytestream self.write_bytes(v) self.write = self.write_bytes own_fh = False if isinstance(fname, os_PathLike): fname = os_fspath(fname) if _is_string_like(fname): # datasource doesn't support creating a new file ... open(fname, 'wt').close() fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) own_fh = True # need to convert str to unicode for text io output if sys.version_info[0] == 2: fh = WriteWrap(fh, encoding or 'latin1') elif hasattr(fname, 'write'): # wrap to handle byte output streams fh = WriteWrap(fname, encoding or 'latin1') else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 0 or X.ndim > 2: raise ValueError( "Expected 1D or 2D array, got %dD array instead" % X.ndim) elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt,)) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(comments + header + newline) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) s = format % tuple(row2) + newline fh.write(s.replace('+-', '-')) else: for row in X: try: v = format % tuple(row) + newline except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) fh.write(v) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(comments + footer + newline) finally: if own_fh: fh.close() @set_module('numpy') def fromregex(file, regexp, dtype, encoding=None): """ Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : str or file File name or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. .. versionadded:: 1.14.0 Returns ------- output : ndarray The output array, containing the part of the content of `file` that was matched by `regexp`. `output` is always a structured array. Raises ------ TypeError When `dtype` is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see `doc.structured_arrays`. Examples -------- >>> f = open('test.dat', 'w') >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', '<i8'), ('key', '|S3')]) >>> output['num'] array([1312, 1534, 444], dtype=int64) """ own_fh = False if not hasattr(file, "read"): file = np.lib._datasource.open(file, 'rt', encoding=encoding) own_fh = True try: if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) content = file.read() if isinstance(content, bytes) and isinstance(regexp, np.unicode): regexp = asbytes(regexp) elif isinstance(content, np.unicode) and isinstance(regexp, bytes): regexp = asstr(regexp) if not hasattr(regexp, 'match'): regexp = re.compile(regexp) seq = regexp.findall(content) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) output.dtype = dtype else: output = np.array(seq, dtype=dtype) return output finally: if own_fh: file.close() #####-------------------------------------------------------------------------- #---- --- ASCII functions --- #####-------------------------------------------------------------------------- @set_module('numpy') def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None, encoding='bytes'): """ Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file, str, pathlib.Path, list of str, generator File, filename, list, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. The strings in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skiprows : int, optional `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was removed in numpy 1.10. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first line after the first `skip_header` lines. This line can optionally be proceeded by a comment delimiter. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variables names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. max_rows : int, optional The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays when possible and passes latin1 encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`, there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] NumPy User Guide, section `I/O with NumPy <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples --------- >>> from io import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO(u"1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Using dtype = None >>> s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Specifying dtype and names >>> s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) An example with fixed-width columns >>> s = StringIO(u"11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, 'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) """ if max_rows is not None: if skip_footer: raise ValueError( "The keywords 'skip_footer' and 'max_rows' can not be " "specified at the same time.") if max_rows < 1: raise ValueError("'max_rows' must be at least 1.") if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters user_converters = converters or {} if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) if encoding == 'bytes': encoding = None byte_converters = True else: byte_converters = False # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if isinstance(fname, os_PathLike): fname = os_fspath(fname) if isinstance(fname, basestring): fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding)) own_fhd = True else: fhd = iter(fname) except TypeError: raise TypeError( "fname must be a string, filehandle, list of strings, " "or generator. Got %s instead." % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip, encoding=encoding) validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Skip the first `skip_header` rows for i in range(skip_header): next(fhd) # Keep on until we find the first valid values first_values = None try: while not first_values: first_line = _decode_line(next(fhd), encoding) if (names is True) and (comments is not None): if comments in first_line: first_line = ( ''.join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty first_line = '' first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) # Should we take the first values as names ? if names is True: fval = first_values[0].strip() if comments is not None: if fval in comments: del first_values[0] # Check the columns to use: make sure `usecols` is a list if usecols is not None: try: usecols = [_.strip() for _ in usecols.split(",")] except AttributeError: try: usecols = list(usecols) except TypeError: usecols = [usecols, ] nbcols = len(usecols or first_values) # Check the names and overwrite the dtype.names if needed if names is True: names = validate_names([str(_.strip()) for _ in first_values]) first_line = '' elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: names = validate_names(names) # Get the dtype if dtype is not None: dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Make sure the names is a list (for 2.5) if names is not None: names = list(names) if usecols: for (i, current) in enumerate(usecols): # if usecols is a list of names, convert to a list of indices if _is_string_like(current): usecols[i] = names.index(current) elif current < 0: usecols[i] = current + len(first_values) # If the dtype is not None, make sure we update it if (dtype is not None) and (len(dtype) > nbcols): descr = dtype.descr dtype = np.dtype([descr[_] for _ in usecols]) names = list(dtype.names) # If `names` is not None, update the names elif (names is not None) and (len(names) > nbcols): names = [names[_] for _ in usecols] elif (names is not None) and (dtype is not None): names = list(dtype.names) # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () if isinstance(user_missing_values, bytes): user_missing_values = user_missing_values.decode('latin1') # Define the list of missing_values (one column: one list) missing_values = [list(['']) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): # Loop on the items for (key, val) in user_missing_values.items(): # Is the key a string ? if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped continue # Redefine the key as needed if it's a column number if usecols: try: key = usecols.index(key) except ValueError: pass # Transform the value as a list of string if isinstance(val, (list, tuple)): val = [str(_) for _ in val] else: val = [str(val), ] # Add the value(s) to the current list of missing if key is None: # None acts as default for miss in missing_values: miss.extend(val) else: missing_values[key].extend(val) # We have a sequence : each item matches a column elif isinstance(user_missing_values, (list, tuple)): for (value, entry) in zip(user_missing_values, missing_values): value = str(value) if value not in entry: entry.append(value) # We have a string : apply it to all entries elif isinstance(user_missing_values, basestring): user_value = user_missing_values.split(",") for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries else: for entry in missing_values: entry.extend([str(user_missing_values)]) # Process the filling_values ............................... # Rename the input for convenience user_filling_values = filling_values if user_filling_values is None: user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually if isinstance(user_filling_values, dict): for (key, val) in user_filling_values.items(): if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped, continue # Redefine the key if it's a column number and usecols is defined if usecols: try: key = usecols.index(key) except ValueError: pass # Add the value to the list filling_values[key] = val # We have a sequence : update on a one-to-one basis elif isinstance(user_filling_values, (list, tuple)): n = len(user_filling_values) if (n <= nbcols): filling_values[:n] = user_filling_values else: filling_values = user_filling_values[:nbcols] # We have something else : use it for all entries else: filling_values = [user_filling_values] * nbcols # Initialize the converters ................................ if dtype is None: # Note: we can't use a [...]*nbcols, as we would have 3 times the same # ... converter, instead of 3 different converters. converters = [StringConverter(None, missing_values=miss, default=fill) for (miss, fill) in zip(missing_values, filling_values)] else: dtype_flat = flatten_dtype(dtype, flatten_base=True) # Initialize the converters if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) converters = [StringConverter(dt, locked=True, missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) converters = [StringConverter(dtype, locked=True, missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead if _is_string_like(j): try: j = names.index(j) i = j except ValueError: continue elif usecols: try: i = usecols.index(j) except ValueError: # Unused converter specified continue else: i = j # Find the value to test - first_line is not filtered by usecols: if len(first_line): testing_value = first_values[j] else: testing_value = None if conv is bytes: user_conv = asbytes elif byte_converters: # converters may use decode to workaround numpy's old behaviour, # so encode the string again before passing to the user converter def tobytes_first(x, conv): if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) import functools user_conv = functools.partial(tobytes_first, conv=conv) else: user_conv = conv converters[i].update(user_conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) uc_update.append((i, user_conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. # miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows rows = [] append_to_rows = rows.append # ... masks if usemask: masks = [] append_to_masks = masks.append # ... invalid invalid = [] append_to_invalid = invalid.append # Parse each line for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): values = split_line(line) nbvalues = len(values) # Skip an empty line if nbvalues == 0: continue if usecols: # Select only the columns we need try: values = [values[_] for _ in usecols] except IndexError: append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) if usemask: append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) if len(rows) == max_rows: break if own_fhd: fhd.close() # Upgrade the converters (if needed) if dtype is None: for (i, converter) in enumerate(converters): current_column = [itemgetter(i)(_m) for _m in rows] try: converter.iterupgrade(current_column) except ConverterLockError: errmsg = "Converter #%i is locked and cannot be upgraded: " % i current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): errmsg += "(occurred line #%i for value '%s')" errmsg %= (j + 1 + skip_header, value) raise ConverterError(errmsg) # Check that we don't have invalid values nbinvalid = len(invalid) if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) invalid = invalid[:nbinvalid - nbinvalid_skipped] skip_footer -= nbinvalid_skipped # # nbrows -= skip_footer # errmsg = [template % (i, nb) # for (i, nb) in invalid if i < nbrows] # else: errmsg = [template % (i, nb) for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) # Raise an exception ? if invalid_raise: raise ValueError(errmsg) # Issue a warning ? else: warnings.warn(errmsg, ConversionWarning, stacklevel=2) # Strip the last skip_footer data if skip_footer > 0: rows = rows[:-skip_footer] if usemask: masks = masks[:-skip_footer] # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... if loose: rows = list( zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) else: rows = list( zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) # Reset the dtype data = rows if dtype is None: # Get the dtypes from the types of the converters column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) if v == np.unicode_] if byte_converters and strcolidx: # convert strings back to bytes for backward compatibility warnings.warn( "Reading unicode strings without specifying the encoding " "argument is deprecated. Set the encoding, use None for the " "system default.", np.VisibleDeprecationWarning, stacklevel=2) def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: row[i] = row[i].encode('latin1') return tuple(row) try: data = [encode_unicode_cols(r) for r in data] except UnicodeEncodeError: pass else: for i in strcolidx: column_types[i] = np.bytes_ # Update string types to be the right length sized_column_types = column_types[:] for i, col_type in enumerate(column_types): if np.issubdtype(col_type, np.character): n_chars = max(len(row[i]) for row in data) sized_column_types[i] = (col_type, n_chars) if names is None: # If the dtype is uniform (before sizing strings) base = { c_type for c, c_type in zip(converters, column_types) if c._checked} if len(base) == 1: uniform_type, = base (ddtype, mdtype) = (uniform_type, bool) else: ddtype = [(defaultfmt % i, dt) for (i, dt) in enumerate(sized_column_types)] if usemask: mdtype = [(defaultfmt % i, bool) for (i, dt) in enumerate(sized_column_types)] else: ddtype = list(zip(names, sized_column_types)) mdtype = list(zip(names, [bool] * len(sized_column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed if names and dtype.names: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. if 'O' in (_.char for _ in dtype_flat): if has_nested_fields(dtype): raise NotImplementedError( "Nested fields involving objects are not supported...") else: output = np.array(data, dtype=dtype) else: rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) output = rows.view(dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array( masks, dtype=np.dtype([('', bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) # Case #2. We have a basic dtype else: # We used some user-defined converters if user_converters: ishomogeneous = True descr = [] for i, ttype in enumerate([conv.type for conv in converters]): # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) if np.issubdtype(ttype, np.character): ttype = (ttype, max(len(row[i]) for row in data)) descr.append(('', ttype)) else: descr.append(('', dtype)) # So we changed the dtype ? if not ishomogeneous: # We have more than one field if len(descr) > 1: dtype = np.dtype(descr) # We have only one field: drop the name if not needed. else: dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: if dtype.names: mdtype = [(_, bool) for _ in dtype.names] else: mdtype = bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: for (name, conv) in zip(names, converters): missing_values = [conv(_) for _ in conv.missing_values if _ != ''] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array if usemask: output = output.view(MaskedArray) output._mask = outputmask if unpack: return output.squeeze().T return output.squeeze() def ndfromtxt(fname, **kwargs): """ Load ASCII data stored in a file and return it as a single array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function. """ kwargs['usemask'] = False return genfromtxt(fname, **kwargs) def mafromtxt(fname, **kwargs): """ Load ASCII data stored in a text file and return a masked array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. """ kwargs['usemask'] = True return genfromtxt(fname, **kwargs) def recfromtxt(fname, **kwargs): """ Load ASCII data from a file and return it in a record array. If ``usemask=False`` a standard `recarray` is returned, if ``usemask=True`` a MaskedRecords array is returned. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ kwargs.setdefault("dtype", None) usemask = kwargs.get('usemask', False) output = genfromtxt(fname, **kwargs) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output def recfromcsv(fname, **kwargs): """ Load ASCII data stored in a comma-separated file. The returned array is a record array (if ``usemask=False``, see `recarray`) or a masked record array (if ``usemask=True``, see `ma.mrecords.MaskedRecords`). Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ # Set default kwargs for genfromtxt as relevant to csv import. kwargs.setdefault("case_sensitive", "lower") kwargs.setdefault("names", True) kwargs.setdefault("delimiter", ",") kwargs.setdefault("dtype", None) output = genfromtxt(fname, **kwargs) usemask = kwargs.get("usemask", False) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output
mit
jseabold/scikit-learn
sklearn/utils/setup.py
296
2884
import os from os.path import join from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('sparsetools') cblas_libs, blas_info = get_blas_info() cblas_compile_args = blas_info.pop('extra_compile_args', []) cblas_includes = [join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])] libraries = [] if os.name == 'posix': libraries.append('m') cblas_libs.append('m') config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'], libraries=libraries) config.add_extension('arrayfuncs', sources=['arrayfuncs.c'], depends=[join('src', 'cholesky_delete.h')], libraries=cblas_libs, include_dirs=cblas_includes, extra_compile_args=cblas_compile_args, **blas_info ) config.add_extension( 'murmurhash', sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')], include_dirs=['src']) config.add_extension('lgamma', sources=['lgamma.c', join('src', 'gamma.c')], include_dirs=['src'], libraries=libraries) config.add_extension('graph_shortest_path', sources=['graph_shortest_path.c'], include_dirs=[numpy.get_include()]) config.add_extension('fast_dict', sources=['fast_dict.cpp'], language="c++", include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension('seq_dataset', sources=['seq_dataset.c'], include_dirs=[numpy.get_include()]) config.add_extension('weight_vector', sources=['weight_vector.c'], include_dirs=cblas_includes, libraries=cblas_libs, **blas_info) config.add_extension("_random", sources=["_random.c"], include_dirs=[numpy.get_include()], libraries=libraries) config.add_extension("_logistic_sigmoid", sources=["_logistic_sigmoid.c"], include_dirs=[numpy.get_include()], libraries=libraries) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
studio666/gnuradio
gr-filter/examples/fir_filter_ccc.py
47
4019
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, filter from gnuradio import analog from gnuradio import blocks from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import sys try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) class example_fir_filter_ccc(gr.top_block): def __init__(self, N, fs, bw, tw, atten, D): gr.top_block.__init__(self) self._nsamps = N self._fs = fs self._bw = bw self._tw = tw self._at = atten self._decim = D taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at) print "Num. Taps: ", len(taps) self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1) self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps) self.filt0 = filter.fir_filter_ccc(self._decim, taps) self.vsnk_src = blocks.vector_sink_c() self.vsnk_out = blocks.vector_sink_c() self.connect(self.src, self.head, self.vsnk_src) self.connect(self.head, self.filt0, self.vsnk_out) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=10000, help="Number of samples to process [default=%default]") parser.add_option("-s", "--samplerate", type="eng_float", default=8000, help="System sample rate [default=%default]") parser.add_option("-B", "--bandwidth", type="eng_float", default=1000, help="Filter bandwidth [default=%default]") parser.add_option("-T", "--transition", type="eng_float", default=100, help="Transition band [default=%default]") parser.add_option("-A", "--attenuation", type="eng_float", default=80, help="Stopband attenuation [default=%default]") parser.add_option("-D", "--decimation", type="int", default=1, help="Decmation factor [default=%default]") (options, args) = parser.parse_args () put = example_fir_filter_ccc(options.nsamples, options.samplerate, options.bandwidth, options.transition, options.attenuation, options.decimation) put.run() data_src = scipy.array(put.vsnk_src.data()) data_snk = scipy.array(put.vsnk_out.data()) # Plot the signals PSDs nfft = 1024 f1 = pylab.figure(1, figsize=(12,10)) s1 = f1.add_subplot(1,1,1) s1.psd(data_src, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) f2 = pylab.figure(2, figsize=(12,10)) s2 = f2.add_subplot(1,1,1) s2.plot(data_src) s2.plot(data_snk.real, 'g') pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
arnavd96/Cinemiezer
myvenv/lib/python3.4/site-packages/numpy/lib/function_base.py
15
150516
from __future__ import division, absolute_import, print_function import warnings import sys import collections import operator import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10 ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import _insert, add_docstring from numpy.core.multiarray import digitize, bincount, interp as compiled_interp from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring # Force range to be a generator, for np.delete's usage. if sys.version_info[0] < 3: range = xrange __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : {0, 1} Return 1 if the object has an iterator method or is a sequence, and 0 otherwise. Examples -------- >>> np.iterable([1, 2, 3]) 1 >>> np.iterable(2) 0 """ try: iter(y) except: return 0 return 1 def _hist_bin_sqrt(x): """ Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x): """ Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x): """ Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x): """ Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_doane(x): """ Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x): """ Histogram bin estimator that uses the minimum width of the Freedman-Diaconis and Sturges estimators. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x`. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off the shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges """ # There is no need to check for zero here. If ptp is, so is IQR and # vice versa. Either both are zero or neither one is. return min(_hist_bin_fd(x), _hist_bin_sturges(x)) # Private dict initialized at module load time _hist_bin_selectors = {'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): r""" Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal bin width and consequently the number of bins (see `Notes` for more detail on the estimators) from the data that falls within the requested range. While the bin width will be optimal for the actual data in the range, the number of bins will be computed to fill the entire range, including the empty portions. For visualisation, using the 'auto' option is suggested. Weighted data is not supported for automated bin size selection. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size . 'doane' An improved version of Sturges' estimator that works better with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. 'sqrt' Square root (of data size) estimator, used by Excel and other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This keyword is deprecated in Numpy 1.6 due to confusing/buggy behavior. It will be removed in Numpy 2.0. Use the ``density`` keyword instead. If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use ``density`` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well founded in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))`. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for large datasets. The standard deviation is not very robust to outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. 'Rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1})} g_1 = mean[(\frac{x - \mu}{\sigma})^3] \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. This estimator attempts to account for the skew of the data. 'Sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the data size. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist*np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) >>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() # Do not modify the original value of range so we can check for `None` if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. mn, mx = 0.0, 1.0 else: mn, mx = a.min() + 0.0, a.max() + 0.0 else: mn, mx = [mi + 0.0 for mi in range] if mn > mx: raise ValueError( 'max must be larger than min in range parameter.') if not np.all(np.isfinite([mn, mx])): raise ValueError( 'range parameter must be finite.') if mn == mx: mn -= 0.5 mx += 0.5 if isinstance(bins, basestring): # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated if bins not in _hist_bin_selectors: raise ValueError("{0} not a valid estimator for bins".format(bins)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") # Make a reference to `a` b = a # Update the reference if the range needs truncation if range is not None: keep = (a >= mn) keep &= (a <= mx) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: bins = 1 else: # Do not call selectors on empty arrays width = _hist_bin_selectors[bins](b) if width: bins = int(np.ceil((mx - mn) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. bins = 1 # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, np.complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(bins, ntype) # Pre-compute histogram scaling factor norm = bins / (mx - mn) # Compute the bin edges for potential correction. bin_edges = linspace(mn, mx, bins + 1, endpoint=True) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= mn) keep &= (tmp_a <= mx) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) tmp_a = tmp_a_data - mn tmp_a *= norm # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) indices[indices == bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) else: n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) # Rename the bin edges for return. bins = bin_edges else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise ValueError( 'bins must increase monotonically.') # Initialize empty histogram n = np.zeros(bins.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for Numpy 2.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: if not np.all(np.isfinite(range)): raise ValueError( 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which to average `a`. If `None`, averaging is done over the flattened array. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ if not isinstance(a, np.matrix): a = np.asarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: a = a + 0.0 wgt = np.asarray(weights) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype)) if (scl == 0.0).any(): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt).sum(axis)/scl if returned: scl = np.multiply(avg, 0) + scl return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print('ValueError') ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray The input domain. condlist : list of bool arrays Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., lambda=1)``, then each function is called as ``f(x, lambda=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if condlist.shape[-1] != 1: condlist = condlist.T if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) # Only able to stack vertically if the array is 1d or less if x.ndim <= 1: condlist = np.vstack([condlist, ~totlist]) else: condlist = [asarray(c, dtype=bool) for c in condlist] totlist = condlist[0] for k in range(1, n): totlist |= condlist[k] condlist.append(~totlist) n += 1 y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:ndarray.copy are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior and either first differences or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : scalar or list of scalar, optional N scalars specifying the sample distances for each dimension, i.e. `dx`, `dy`, `dz`, ... Default distance: 1. single scalar specifies sample distance for all dimensions. if `axis` is given, the number of varargs must equal the number of axes. edge_order : {1, 2}, optional Gradient is calculated using N\ :sup:`th` order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : list of ndarray Each element of `list` has the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] >>> x = np.array([0, 1, 2, 3, 4]) >>> dx = np.gradient(x) >>> y = x**2 >>> np.gradient(y, dx, edge_order=2) array([-0., 2., 4., 6., 8.]) The axis keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) """ f = np.asanyarray(f) N = len(f.shape) # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) # check axes to have correct type and no duplicate entries if isinstance(axes, int): axes = (axes,) if not isinstance(axes, tuple): raise TypeError("A tuple of integers or a single integer is required") # normalize axis values: axes = tuple(x + N if x < 0 else x for x in axes) if max(axes) >= N or min(axes) < 0: raise ValueError("'axis' entry is out of bounds") if len(set(axes)) != len(axes): raise ValueError("duplicate value in 'axis'") n = len(varargs) if n == 0: dx = [1.0]*N elif n == 1: dx = [varargs[0]]*N elif n == len(axes): dx = list(varargs) else: raise SyntaxError( "invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: otype = 'd' # Difference of datetime64 elements results in timedelta64 if otype == 'M': # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace('datetime', 'timedelta') elif otype == 'm': # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: y = f for i, axis in enumerate(axes): if y.shape[axis] < 2: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least two elements are required.") # Numerical differentiation: 1st order edges, 2nd order interior if y.shape[axis] == 2 or edge_order == 1: # Use first order differences for time data out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 # 1D equivalent -- out[0] = (y[1] - y[0]) out[slice1] = (y[slice2] - y[slice3]) slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 # 1D equivalent -- out[-1] = (y[-1] - y[-2]) out[slice1] = (y[slice2] - y[slice3]) # Numerical differentiation: 2st order edges, 2nd order interior else: # Use second order differences where possible out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 slice4[axis] = -3 # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 # divide by step size out /= dx[i] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len(axes) == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. . See Also -------- gradient, ediff1d, cumsum Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = len(a.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) if n > 1: return diff(a[slice1]-a[slice2], n-1, axis=axis) else: return a[slice1]-a[slice2] def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of floats The y-coordinates of the data points, same length as `xp`. left : float, optional Value to return for `x < xp[0]`, default is `fp[0]`. right : float, optional Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) """ if period is None: if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return compiled_interp([x], xp, fp, left, right).item() else: return compiled_interp(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=np.float64) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return compiled_interp(x, xp, fp, left, right) else: return compiled_interp(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = len(p.shape) dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N it will be repeated. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ if not isinstance(arr, np.ndarray): raise TypeError("argument 1 must be numpy.ndarray, " "not {name}".format(name=type(arr).__name__)) return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return class vectorize(object): """ vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns a numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. """ def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False): self.pyfunc = pyfunc self.cache = cache self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): self.otypes = otypes for char in self.otypes: if char not in typecodes['All']: raise ValueError( "Invalid otype specified: %s" % (char,)) elif iterable(otypes): self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) else: raise ValueError( "Invalid otype specification") # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) inputs = [asarray(_a).flat[0] for _a in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if not args: _res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(_a, copy=False, subok=True, dtype=object) for _a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: _res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: _res = tuple([array(_x, copy=False, subok=True, dtype=_t) for _x, _t in zip(outputs, otypes)]) return _res def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x)) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if rowvar == 0 and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if rowvar == 0 and y.shape[0] != 1: y = y.T X = np.vstack((X, y)) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=np.float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=np.float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- Due to floating point rounding the resulting array may not be Hermitian, the diagonal elements may not be 1, and the elements may not satisfy the inequality abs(a) <= 1. The real and imaginary parts are clipped to the interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function Kapable of receiving an axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim try: axis = operator.index(axis) if axis >= nd or axis < -nd: raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) keepdim[axis] = 1 except TypeError: sax = set() for x in axis: if x >= nd or x < -nd: raise IndexError("axis %d out of bounds (%d)" % (x, nd)) if x in sax: raise ValueError("duplicate value in axis") sax.add(x % nd) keepdim[x] = 1 keep = sax.symmetric_difference(frozenset(range(nd))) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 else: keepdim = [1] * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) part = np.rollaxis(part, axis, part.ndim) n = np.isnan(part[..., -1]) if rout.ndim == 0: if n == True: warnings.warn("Invalid value encountered in median", RuntimeWarning) if out is not None: out[...] = a.dtype.type(np.nan) rout = out else: rout = a.dtype.type(np.nan) elif np.count_nonzero(n.ravel()) > 0: warnings.warn("Invalid value encountered in median for" + " %d results" % np.count_nonzero(n.ravel()), RuntimeWarning) rout[n] = np.nan return rout else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile(s) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, nanpercentile Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the mimumum to the maximum in in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: if q.ndim == 0: return r.reshape(k) else: return r.reshape([len(q)] + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) weights_below = np.rollaxis(weights_below, axis, 0) weights_above = np.rollaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.rollaxis(x1, axis, 0) x2 = np.rollaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in percentile", RuntimeWarning) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional The sample points corresponding to the `y` values. If `x` is None, the sample points are assumed to be evenly spaced `dx` apart. The default is None. dx : scalar, optional The spacing between sample points when `x` is None. The default is 1. axis : int, optional The axis along which to integrate. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """ Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(xi)] shape = [x.size for x in output] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + (1,)*(ndim - 2) output[1].shape = (-1, 1) + (1,)*(ndim - 2) shape[0], shape[1] = shape[1], shape[0] if sparse: if copy_: return [x.copy() for x in output] else: return output else: # Return the full N-D matrix (not only the 1-D vector) if copy_: mult_fact = np.ones(shape, dtype=int) return [x * mult_fact for x in output] else: return np.broadcast_arrays(*output) def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning) if wrap: return wrap(arr) else: return arr.copy() slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy()) else: return arr.copy() # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn( "in the future insert will treat boolean arrays and array-likes " "as boolean index instead of casting it to integer", FutureWarning) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 else: if ndim > 0 and (axis < -ndim or axis >= ndim): raise IndexError( "axis %i is out of bounds for an array of " "dimension %i" % (axis, ndim)) if (axis < 0): axis += ndim if (ndim == 0): # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning) arr = arr.copy() arr[...] = values if wrap: return wrap(arr) else: return arr slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
mit
mrcslws/htmresearch
projects/combined_sequences/object_convergence.py
3
27757
# Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2016, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ This file plots the behavior of L4-L2-TM network as you train it on objects. """ import random import time import os from math import ceil import numpy import cPickle from multiprocessing import Pool, cpu_count import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Arial']}) from htmresearch.frameworks.layers.combined_sequence_experiment import ( L4TMExperiment ) from htmresearch.frameworks.layers.object_machine_factory import ( createObjectMachine ) def locateConvergencePoint(stats, minOverlap, maxOverlap): """ Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point. """ for i,v in enumerate(stats[::-1]): if not (v >= minOverlap and v <= maxOverlap): return len(stats)-i + 1 # Never differs - converged in one iteration return 1 def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap, settlingTime): """ inferenceStats contains activity traces while the system visits each object. Given the i'th object, inferenceStats[i] contains activity statistics for each column for each region for the entire sequence of sensations. For each object, compute the convergence time - the first point when all L2 columns have converged. Return the average convergence time across all objects. Given inference statistics for a bunch of runs, locate all traces with the given prefix. For each trace locate the iteration where it finally settles on targetValue. Return the average settling iteration across all runs. """ convergenceSum = 0.0 numCorrect = 0.0 inferenceLength = 1000000 # For each object for stats in inferenceStats: # For each L2 column locate convergence time convergencePoint = 0.0 for key in stats.iterkeys(): if prefix in key: columnConvergence = locateConvergencePoint( stats[key], minOverlap, maxOverlap) # Ensure this column has converged by the last iteration # assert(columnConvergence <= len(stats[key])) convergencePoint = max(convergencePoint, columnConvergence) convergenceSum += ceil(float(convergencePoint)/settlingTime) if ceil(float(convergencePoint)/settlingTime) <= inferenceLength: numCorrect += 1 return convergenceSum/len(inferenceStats), numCorrect/len(inferenceStats) def averageSequenceAccuracy(inferenceStats, minOverlap, maxOverlap): """ inferenceStats contains activity traces while the system visits each object. Given the i'th object, inferenceStats[i] contains activity statistics for each column for each region for the entire sequence of sensations. For each object, decide whether the TM uniquely classified it by checking that the number of predictedActive cells are in an acceptable range. """ numCorrect = 0.0 numStats = 0.0 prefix = "TM PredictedActive" # For each object for stats in inferenceStats: # Keep running total of how often the number of predictedActive cells are # in the range. for key in stats.iterkeys(): if prefix in key: for numCells in stats[key]: numStats += 1.0 if numCells in range(minOverlap, maxOverlap+1): numCorrect += 1.0 return numCorrect / numStats def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param pointRange (int) Creates objects each with points ranging from [numPoints,...,numPoints+pointRange-1] A total of numObjects * pointRange objects will be created. Default: 1 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 @param networkType (string)The type of network to use. Options are: "MultipleL4L2Columns", "MultipleL4L2ColumnsWithTopology" and "MultipleL4L2ColumnsWithRandomTopology". Default: "MultipleL4L2Columns" @param settlingTime (int) Number of iterations we wait to let columns stabilize. Important for multicolumn experiments with lateral connections. @param includeRandomLocation (bool) If True, a random location SDR will be generated during inference for each feature. The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 1) networkType = args.get("networkType", "L4L2TMColumn") profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) pointRange = args.get("pointRange", 1) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) inputSize = args.get("inputSize", 512) numInputBits = args.get("inputBits", 20) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=numInputBits, sensorInputSize=inputSize, externalInputSize=1024, numCorticalColumns=numColumns, numFeatures=numFeatures, seed=trialNum ) for p in range(pointRange): objects.createRandomObjects(numObjects, numPoints=numPoints+p, numLocations=numLocations, numFeatures=numFeatures) r = objects.objectConfusion() print "Average common pairs=", r[0], print ", locations=",r[1], print ", features=",r[2] # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network. Ensure both TM layers have identical # parameters. name = "convergence_O%03d_L%03d_F%03d_T%03d" % ( numObjects, numLocations, numFeatures, trialNum ) exp = L4TMExperiment( name=name, numCorticalColumns=numColumns, networkType = networkType, inputSize=inputSize, numInputBits=numInputBits, externalInputSize=1024, numExternalInputBits=numInputBits, seed=trialNum, L4Overrides={"initialPermanence": 0.41, "activationThreshold": 18, "minThreshold": 18, "basalPredictedSegmentDecrement": 0.0001}, ) # We want to traverse the features of each object randomly a few times before # moving on to the next time. Create the SDRs that we need for this. objectsToLearn = objects.provideObjectsToLearn() objectTraversals = {} for objectId in objectsToLearn: objectTraversals[objectId] = objects.randomTraversal( objectsToLearn[objectId], settlingTime) # Train the network on all the SDRs for all the objects exp.learnObjects(objectTraversals) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for settlingTime time steps to let it settle and # ensure it converges. for objectId in objects: obj = objects[objectId] objectSensations = {} for c in range(numColumns): objectSensations[c] = [] assert numColumns == 1 # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: objectSensations[0].append(pair) inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, "includeRandomLocation": includeRandomLocation, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName=objectId) if profile: exp.printProfile(reset=True) if plotInferenceStats: plotOneInferenceRun( exp.statistics[objectId], fields=[ ("L4 PredictedActive", "Predicted active cells in sensorimotor layer"), ("TM NextPredicted", "Predicted cells in temporal sequence layer"), ("TM PredictedActive", "Predicted active cells in temporal sequence layer"), ], basename=exp.name, experimentID=objectId, plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)), "detailed_plots") ) # Compute overall inference statistics infStats = exp.getInferenceStats() convergencePoint, sensorimotorAccuracy = averageConvergencePoint( infStats,"L2 Representation", 30, 40, settlingTime) sequenceAccuracy = averageSequenceAccuracy(infStats, 15, 25) predictedActive = numpy.zeros(len(infStats)) predicted = numpy.zeros(len(infStats)) predictedActiveL4 = numpy.zeros(len(infStats)) predictedL4 = numpy.zeros(len(infStats)) for i,stat in enumerate(infStats): predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(stat["TM PredictedActive C0"][2:]) predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(stat["TM NextPredicted C0"][2:]) predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(stat["L4 PredictedActive C0"]) predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(stat["L4 Predicted C0"]) print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format( numObjects, numFeatures, numLocations, numColumns, trialNum, networkType) print "Average convergence point=",convergencePoint, print "Accuracy:", sensorimotorAccuracy print "Sequence accuracy:", sequenceAccuracy print # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint":convergencePoint}) args.update({"sensorimotorAccuracyPct": sensorimotorAccuracy}) args.update({"sequenceAccuracyPct": sequenceAccuracy}) args.update({"averagePredictions": predicted.mean()}) args.update({"averagePredictedActive": predictedActive.mean()}) args.update({"averagePredictionsL4": predictedL4.mean()}) args.update({"averagePredictedActiveL4": predictedActiveL4.mean()}) # Can't pickle experiment so can't return it for batch multiprocessing runs. # However this is very useful for debugging when running in a single thread. if plotInferenceStats: args.update({"experiment": exp}) return args def runExperimentPool(numObjects, numLocations, numFeatures, numWorkers=7, nTrials=1, pointRange=1, numPoints=10, includeRandomLocation=False, resultsName="convergence_results.pkl"): """ Allows you to run a number of experiments using multiple processes. For each parameter except numWorkers, pass in a list containing valid values for that parameter. The cross product of everything is run, and each combination is run nTrials times. Returns a list of dict containing detailed results from each experiment. Also pickles and saves the results in resultsName for later analysis. Example: results = runExperimentPool( numObjects=[10], numLocations=[5], numFeatures=[5], numColumns=[2,3,4,5,6], numWorkers=8, nTrials=5) """ # Create function arguments for every possibility args = [] for o in reversed(numObjects): for l in numLocations: for f in numFeatures: for t in range(nTrials): args.append( {"numObjects": o, "numLocations": l, "numFeatures": f, "trialNum": t, "pointRange": pointRange, "numPoints": numPoints, "plotInferenceStats": False, "includeRandomLocation": includeRandomLocation, "settlingTime": 3, } ) print "{} experiments to run, {} workers".format(len(args), numWorkers) # Run the pool if numWorkers > 1: pool = Pool(processes=numWorkers) result = pool.map(runExperiment, args) else: result = [] for arg in args: result.append(runExperiment(arg)) # Pickle results for later use with open(resultsName,"wb") as f: cPickle.dump(result,f) return result def plotConvergenceByObject(results, objectRange, featureRange, numTrials): """ Plots the convergence graph: iterations vs number of objects. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f,o] = how long it took it to converge with f unique features # and o objects. convergence = numpy.zeros((max(featureRange), max(objectRange) + 1)) for r in results: if r["numFeatures"] in featureRange: convergence[r["numFeatures"] - 1, r["numObjects"]] += r["convergencePoint"] convergence /= numTrials ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plots", "convergence_by_object.pdf") # Plot each curve legendList = [] colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] for i in range(len(featureRange)): f = featureRange[i] print "features={} objectRange={} convergence={}".format( f,objectRange, convergence[f-1,objectRange]) legendList.append('Unique features={}'.format(f)) plt.plot(objectRange, convergence[f-1,objectRange], color=colorList[i]) # format plt.legend(legendList, loc="lower right", prop={'size':10}) plt.xlabel("Number of objects in training set") plt.xticks(range(0,max(objectRange)+1,10)) plt.yticks(range(0,int(convergence.max())+2)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (single column)") # save plt.savefig(plotPath) plt.close() def plotPredictionsByObject(results, objectRange, featureRange, numTrials, key="", title="", yaxis=""): """ Plots the convergence graph: iterations vs number of objects. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # predictions[f,o] = how long it took it to converge with f unique features # and o objects. predictions = numpy.zeros((max(featureRange), max(objectRange) + 1)) for r in results: if r["numFeatures"] in featureRange: predictions[r["numFeatures"] - 1, r["numObjects"]] += r[key] predictions /= numTrials ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plots", key+"_by_object.pdf") # Plot each curve legendList = [] colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] for i in range(len(featureRange)): f = featureRange[i] print "features={} objectRange={} convergence={}".format( f,objectRange, predictions[f-1,objectRange]) legendList.append('Unique features={}'.format(f)) plt.plot(objectRange, predictions[f-1,objectRange], color=colorList[i]) # format plt.legend(legendList, loc="center right", prop={'size':10}) plt.xlabel("Number of objects in training set") plt.xticks(range(0,max(objectRange)+1,10)) plt.yticks(range(0,int(predictions.max())+2,10)) plt.ylabel(yaxis) plt.title(title) # save plt.savefig(plotPath) plt.close() def plotSequenceAccuracy(results, featureRange, objectRange, title="", yaxis=""): """ Plot accuracy vs number of features """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # accuracy[o,f] = accuracy with o objects in training # and f unique features. accuracy = numpy.zeros((max(objectRange)+1, max(featureRange) + 1)) totals = numpy.zeros((max(objectRange)+1, max(featureRange) + 1)) for r in results: if r["numFeatures"] in featureRange and r["numObjects"] in objectRange: accuracy[r["numObjects"], r["numFeatures"]] += r["sequenceAccuracyPct"] totals[r["numObjects"], r["numFeatures"]] += 1 for o in objectRange: for f in featureRange: accuracy[o, f] = 100.0 * accuracy[o, f] / totals[o, f] ######################################################################## # # Create the plot. plt.figure() plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plots", "sequenceAccuracy_by_object.pdf") # Plot each curve legendList = [] colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] for i in range(len(featureRange)): f = featureRange[i] print "features={} objectRange={} accuracy={}".format( f,objectRange, accuracy[objectRange, f]) print "Totals=",totals[objectRange, f] legendList.append('Sequence layer, feature pool size: {}'.format(f)) plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i]) plt.plot(objectRange, [100] * len(objectRange), color=colorList[len(featureRange)]) legendList.append('Sensorimotor layer') # format plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10}) plt.xlabel("Number of objects") # plt.xticks(range(0,max(locationRange)+1,10)) # plt.yticks(range(0,int(accuracy.max())+2,10)) plt.ylim(-10.0, 110.0) plt.ylabel(yaxis) plt.title(title) # save plt.savefig(plotPath) plt.close() def plotSequenceAccuracyBargraph(results, featureRange, objectRange, title="", yaxis=""): """ Plot accuracy vs number of features """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # accuracy[o,f] = accuracy with o objects in training # and f unique features. accuracy = numpy.zeros(max(featureRange) + 1) totals = numpy.zeros(max(featureRange) + 1) for r in results: if r["numFeatures"] in featureRange and r["numObjects"] in objectRange: accuracy[r["numFeatures"]] += r["sequenceAccuracyPct"] totals[r["numFeatures"]] += 1 for f in featureRange: accuracy[f] = 100.0 * accuracy[f] / totals[f] ######################################################################## # # Create the plot. plt.figure() plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "plots", "sequenceAccuracy_by_object_bar.pdf") # Plot each curve legendList = [] colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] ind = numpy.arange(len(featureRange)) width = 0.35 for i in range(len(featureRange)): f = featureRange[i] print "features={} objectRange={} accuracy={}".format( f,objectRange, accuracy[f]) print "Totals=",totals[f] plt.bar(i, 100.0, width, color='black') plt.bar(i+width, accuracy[f], width, color='white', edgecolor='black') legendList.append("Sensorimotor layer") legendList.append("Sequence layer") plt.legend(legendList, bbox_to_anchor=(0., 0.87, 1.0, .102), loc="right", prop={'size':10}) plt.xlabel("Number of objects") plt.xticks(ind + width / 2, featureRange) plt.ylim(0.0, 119.0) plt.ylabel(yaxis) plt.title(title) # # # save plt.savefig(plotPath) plt.close() def plotOneInferenceRun(stats, fields, basename, plotDir="plots", experimentID=0): """ Plots individual inference runs. """ if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() objectName = stats["object"] # plot request stats for field in fields: fieldKey = field[0] + " C0" plt.plot(stats[fieldKey], marker='+', label=field[1]) # format plt.legend(loc="upper right") plt.xlabel("Input number") plt.xticks(range(stats["numSteps"])) plt.ylabel("Number of cells") # plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5) plt.ylim(-5, 50) plt.title("Activity while inferring a single object".format(objectName)) # save relPath = "{}_exp_{}.pdf".format(basename, experimentID) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close() if __name__ == "__main__": startTime = time.time() dirName = os.path.dirname(os.path.realpath(__file__)) # This is how you run a specific experiment in single process mode. Useful # for debugging, profiling, etc. if False: results = runExperiment( { "numObjects": 50, "numPoints": 10, "numLocations": 100, "numFeatures": 25, "trialNum": 4, "pointRange": 1, "plotInferenceStats": True, # Outputs detailed graphs "settlingTime": 3, } ) # Here we want to check accuracy of the TM network in classifying the # objects. if True: # We run 10 trials for each column number and then analyze results numTrials = 10 featureRange = [5, 10, 50] objectRange = [2, 5, 10, 20, 30, 40, 50, 70] locationRange = [100] resultsName = os.path.join(dirName, "sequence_accuracy_results.pkl") # Comment this out if you are re-running analysis on already saved results. # Very useful for debugging the plots # runExperimentPool( # numObjects=objectRange, # numFeatures=featureRange, # numLocations=locationRange, # numPoints=10, # nTrials=numTrials, # numWorkers=cpu_count() - 1, # resultsName=resultsName) # Analyze results with open(resultsName,"rb") as f: results = cPickle.load(f) plotSequenceAccuracy(results, featureRange, objectRange, title="Relative performance of layers during sensorimotor inference", yaxis="Accuracy (%)") # plotSequenceAccuracyBargraph(results, featureRange, objectRange, # title="Performance while inferring objects", # yaxis="Accuracy (%)") # Here we want to see how the number of objects affects convergence for a # single column. # This experiment is run using a process pool if False: # We run 10 trials for each column number and then analyze results numTrials = 10 featureRange = [5, 10, 100, 1000] objectRange = [2, 5, 10, 20, 30, 50] locationRange = [10, 100, 500, 1000] resultsName = os.path.join(dirName, "object_convergence_results.pkl") # Comment this out if you are re-running analysis on already saved results. # Very useful for debugging the plots runExperimentPool( numObjects=objectRange, numLocations=locationRange, numFeatures=featureRange, numPoints=10, nTrials=numTrials, numWorkers=cpu_count() - 1, resultsName=resultsName) # Analyze results with open(resultsName,"rb") as f: results = cPickle.load(f) plotConvergenceByObject(results, objectRange, featureRange, numTrials) plotPredictionsByObject(results, objectRange, featureRange, numTrials, key="averagePredictions", title="Predictions in temporal sequence layer", yaxis="Average number of predicted cells") plotPredictionsByObject(results, objectRange, featureRange, numTrials, key="averagePredictedActive", title="Correct predictions in temporal sequence layer", yaxis="Average number of correctly predicted cells" ) plotPredictionsByObject(results, objectRange, featureRange, numTrials, key="averagePredictedActiveL4", title="Correct predictions in sensorimotor layer", yaxis="Average number of correctly predicted cells" ) plotPredictionsByObject(results, objectRange, featureRange, numTrials, key="averagePredictionsL4", title="Predictions in sensorimotor layer", yaxis="Average number of predicted cells") print "Actual runtime=",time.time() - startTime
agpl-3.0
avistous/QSTK
Tools/Visualizer/GenerateData.py
3
2880
''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on April, 20, 2012 @author: Sourabh Bajaj @contact: sourabhbajaj90@gmail.com @summary: Visualizer - Generating Data from QSTK ''' #import libraries import numpy as np import qstkutil.qsdateutil as du import qstkutil.tsutil as tsu import qstkutil.DataAccess as da import qstkfeat.sb_featutil as feat import datetime as dt import pickle import os import matplotlib.pyplot as plt from pylab import * from pandas import * # Changes made in the featutil file are : Edited the getFeatureFuncs() def genData(startday, endday, datadirectory, symbols): coredirectory = os.environ['QS']+'Tools/Visualizer/Data/' directorylocation= coredirectory+datadirectory+'_'+startday.date().isoformat() +'_'+endday.date().isoformat() if not os.path.exists(directorylocation): os.mkdir(directorylocation) directorylocation = directorylocation +'/' timeofday = dt.timedelta(hours=16) timestamps = du.getNYSEdays(startday,endday,timeofday) #Creating a txt file of timestamps file = open(directorylocation +'TimeStamps.txt', 'w') for onedate in timestamps: stringdate=dt.date.isoformat(onedate) file.write(stringdate+'\n') file.close() # Reading the Stock Price Data dataobj = da.DataAccess('Norgate') all_symbols = dataobj.get_all_symbols() badsymbols=set(symbols)-set(all_symbols) if len(list(badsymbols))>0: print "Some Symbols are not valid" + str(badsymbols) symbols=list(set(symbols)-badsymbols) lsKeys = ['open', 'high', 'low', 'close', 'volume'] ldfData = dataobj.get_data( timestamps, symbols, lsKeys ) dData = dict(zip(lsKeys, ldfData)) # Creating the 3D Matrix (lfcFeatures, ldArgs, lsNames)= feat.getFeatureFuncs22() FinalData = feat.applyFeatures( dData, lfcFeatures, ldArgs, sMarketRel='SPY') #Creating a txt file of symbols file = open(directorylocation +'Symbols.txt', 'w') for sym in symbols: file.write(str(sym)+'\n') file.close() #Creating a txt file of Features file = open(directorylocation +'Features.txt', 'w') for f in lsNames: file.write(f+'\n') file.close() Numpyarray=[] for IndicatorData in FinalData: Numpyarray.append(IndicatorData.values) pickle.dump(Numpyarray,open(directorylocation +'ALLDATA.pkl', 'wb' ),-1) def main(): startday=dt.datetime(2009,1,1) endday=dt.datetime(2010,12,31) datadirectory = 'Dow' # datadirectory = 'SP500' # symbols=np.loadtxt('Symbols.csv',dtype='S5',comments='#',skiprows=1,) symbols=['SPY','MMM','AA','AXP','T','BAC','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','HPQ','HD','INTC','IBM','JNJ','JPM','KFT', 'MCD','MRK','MSFT','PFE','PG','TRV','UTX','VZ','WMT','DIS'] genData(startday,endday, datadirectory, symbols) if __name__ == '__main__': main()
bsd-3-clause
plissonf/scikit-learn
examples/decomposition/plot_ica_vs_pca.py
306
3329
""" ========================== FastICA on 2D point clouds ========================== This example illustrates visually in the feature space a comparison by results using two different component analysis techniques. :ref:`ICA` vs :ref:`PCA`. Representing ICA in the feature space gives the view of 'geometric ICA': ICA is an algorithm that finds directions in the feature space corresponding to projections with high non-Gaussianity. These directions need not be orthogonal in the original feature space, but they are orthogonal in the whitened feature space, in which all directions correspond to the same variance. PCA, on the other hand, finds orthogonal directions in the raw feature space that correspond to directions accounting for maximum variance. Here we simulate independent sources using a highly non-Gaussian process, 2 student T with a low number of degrees of freedom (top left figure). We mix them to create observations (top right figure). In this raw observation space, directions identified by PCA are represented by orange vectors. We represent the signal in the PCA space, after whitening by the variance corresponding to the PCA vectors (lower left). Running ICA corresponds to finding a rotation in this space to identify the directions of largest non-Gaussianity (lower right). """ print(__doc__) # Authors: Alexandre Gramfort, Gael Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, FastICA ############################################################################### # Generate sample data rng = np.random.RandomState(42) S = rng.standard_t(1.5, size=(20000, 2)) S[:, 0] *= 2. # Mix data A = np.array([[1, 1], [0, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations pca = PCA() S_pca_ = pca.fit(X).transform(X) ica = FastICA(random_state=rng) S_ica_ = ica.fit(X).transform(X) # Estimate the sources S_ica_ /= S_ica_.std(axis=0) ############################################################################### # Plot results def plot_samples(S, axis_list=None): plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10, color='steelblue', alpha=0.5) if axis_list is not None: colors = ['orange', 'red'] for color, axis in zip(colors, axis_list): axis /= axis.std() x_axis, y_axis = axis # Trick to get legend to work plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color) plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6, color=color) plt.hlines(0, -3, 3) plt.vlines(0, -3, 3) plt.xlim(-3, 3) plt.ylim(-3, 3) plt.xlabel('x') plt.ylabel('y') plt.figure() plt.subplot(2, 2, 1) plot_samples(S / S.std()) plt.title('True Independent Sources') axis_list = [pca.components_.T, ica.mixing_] plt.subplot(2, 2, 2) plot_samples(X / np.std(X), axis_list=axis_list) legend = plt.legend(['PCA', 'ICA'], loc='upper right') legend.set_zorder(100) plt.title('Observations') plt.subplot(2, 2, 3) plot_samples(S_pca_ / np.std(S_pca_, axis=0)) plt.title('PCA recovered signals') plt.subplot(2, 2, 4) plot_samples(S_ica_ / np.std(S_ica_)) plt.title('ICA recovered signals') plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36) plt.show()
bsd-3-clause
cactusbin/nyt
matplotlib/lib/matplotlib/mathtext.py
1
110722
r""" :mod:`~matplotlib.mathtext` is a module for parsing a subset of the TeX math syntax and drawing them to a matplotlib backend. For a tutorial of its usage see :ref:`mathtext-tutorial`. This document is primarily concerned with implementation details. The module uses pyparsing_ to parse the TeX expression. .. _pyparsing: http://pyparsing.wikispaces.com/ The Bakoma distribution of the TeX Computer Modern fonts, and STIX fonts are supported. There is experimental support for using arbitrary fonts, but results may vary without proper tweaking and metrics for those fonts. If you find TeX expressions that don't parse or render properly, please email mdroe@stsci.edu, but please check KNOWN ISSUES below first. """ from __future__ import division, print_function import os, sys if sys.version_info[0] >= 3: from io import StringIO else: from cStringIO import StringIO from math import ceil try: set except NameError: from sets import Set as set import unicodedata from warnings import warn from numpy import inf, isinf import numpy as np import pyparsing from pyparsing import Combine, Group, Optional, Forward, \ Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \ ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \ FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException # Enable packrat parsing if (sys.version_info[0] >= 3 and [int(x) for x in pyparsing.__version__.split('.')] < [2, 0, 0]): warn("Due to a bug in pyparsing <= 2.0.0 on Python 3.x, packrat parsing " "has been disabled. Mathtext rendering will be much slower as a " "result. Install pyparsing 2.0.0 or later to improve performance.") else: ParserElement.enablePackrat() from matplotlib.afm import AFM from matplotlib.cbook import Bunch, get_realpath_and_stat, \ is_string_like, maxdict from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING from matplotlib.font_manager import findfont, FontProperties from matplotlib._mathtext_data import latex_to_bakoma, \ latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts from matplotlib import get_data_path, rcParams import matplotlib.colors as mcolors import matplotlib._png as _png #################### ############################################################################## # FONTS def get_unicode_index(symbol): """get_unicode_index(symbol) -> integer Return the integer index (from the Unicode table) of symbol. *symbol* can be a single unicode character, a TeX command (i.e. r'\pi'), or a Type1 symbol name (i.e. 'phi'). """ # From UTF #25: U+2212 minus sign is the preferred # representation of the unary and binary minus sign rather than # the ASCII-derived U+002D hyphen-minus, because minus sign is # unambiguous and because it is rendered with a more desirable # length, usually longer than a hyphen. if symbol == '-': return 0x2212 try:# This will succeed if symbol is a single unicode char return ord(symbol) except TypeError: pass try:# Is symbol a TeX symbol (i.e. \alpha) return tex2uni[symbol.strip("\\")] except KeyError: message = """'%(symbol)s' is not a valid Unicode character or TeX/Type1 symbol"""%locals() raise ValueError(message) def unichr_safe(index): """Return the Unicode character corresponding to the index, or the replacement character if this is a narrow build of Python and the requested character is outside the BMP.""" try: return unichr(index) except ValueError: return unichr(0xFFFD) class MathtextBackend(object): """ The base class for the mathtext backend-specific code. The purpose of :class:`MathtextBackend` subclasses is to interface between mathtext and a specific matplotlib graphics backend. Subclasses need to override the following: - :meth:`render_glyph` - :meth:`render_filled_rect` - :meth:`get_results` And optionally, if you need to use a Freetype hinting style: - :meth:`get_hinting_type` """ def __init__(self): self.width = 0 self.height = 0 self.depth = 0 def set_canvas_size(self, w, h, d): 'Dimension the drawing canvas' self.width = w self.height = h self.depth = d def render_glyph(self, ox, oy, info): """ Draw a glyph described by *info* to the reference point (*ox*, *oy*). """ raise NotImplementedError() def render_filled_rect(self, x1, y1, x2, y2): """ Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*). """ raise NotImplementedError() def get_results(self, box): """ Return a backend-specific tuple to return to the backend after all processing is done. """ raise NotImplementedError() def get_hinting_type(self): """ Get the Freetype hinting type to use with this particular backend. """ return LOAD_NO_HINTING class MathtextBackendAgg(MathtextBackend): """ Render glyphs and rectangles to an FTImage buffer, which is later transferred to the Agg image by the Agg backend. """ def __init__(self): self.ox = 0 self.oy = 0 self.image = None self.mode = 'bbox' self.bbox = [0, 0, 0, 0] MathtextBackend.__init__(self) def _update_bbox(self, x1, y1, x2, y2): self.bbox = [min(self.bbox[0], x1), min(self.bbox[1], y1), max(self.bbox[2], x2), max(self.bbox[3], y2)] def set_canvas_size(self, w, h, d): MathtextBackend.set_canvas_size(self, w, h, d) if self.mode != 'bbox': self.image = FT2Image(ceil(w), ceil(h + d)) def render_glyph(self, ox, oy, info): if self.mode == 'bbox': self._update_bbox(ox + info.metrics.xmin, oy - info.metrics.ymax, ox + info.metrics.xmax, oy - info.metrics.ymin) else: info.font.draw_glyph_to_bitmap( self.image, ox, oy - info.metrics.iceberg, info.glyph, antialiased=rcParams['text.antialiased']) def render_rect_filled(self, x1, y1, x2, y2): if self.mode == 'bbox': self._update_bbox(x1, y1, x2, y2) else: height = max(int(y2 - y1) - 1, 0) if height == 0: center = (y2 + y1) / 2.0 y = int(center - (height + 1) / 2.0) else: y = int(y1) self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height) def get_results(self, box, used_characters): self.mode = 'bbox' orig_height = box.height orig_depth = box.depth ship(0, 0, box) bbox = self.bbox bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1] self.mode = 'render' self.set_canvas_size( bbox[2] - bbox[0], (bbox[3] - bbox[1]) - orig_depth, (bbox[3] - bbox[1]) - orig_height) ship(-bbox[0], -bbox[1], box) result = (self.ox, self.oy, self.width, self.height + self.depth, self.depth, self.image, used_characters) self.image = None return result def get_hinting_type(self): from matplotlib.backends import backend_agg return backend_agg.get_hinting_flag() class MathtextBackendBitmap(MathtextBackendAgg): def get_results(self, box, used_characters): ox, oy, width, height, depth, image, characters = \ MathtextBackendAgg.get_results(self, box, used_characters) return image, depth class MathtextBackendPs(MathtextBackend): """ Store information to write a mathtext rendering to the PostScript backend. """ def __init__(self): self.pswriter = StringIO() self.lastfont = None def render_glyph(self, ox, oy, info): oy = self.height - oy + info.offset postscript_name = info.postscript_name fontsize = info.fontsize symbol_name = info.symbol_name if (postscript_name, fontsize) != self.lastfont: ps = """/%(postscript_name)s findfont %(fontsize)s scalefont setfont """ % locals() self.lastfont = postscript_name, fontsize self.pswriter.write(ps) ps = """%(ox)f %(oy)f moveto /%(symbol_name)s glyphshow\n """ % locals() self.pswriter.write(ps) def render_rect_filled(self, x1, y1, x2, y2): ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1) self.pswriter.write(ps) def get_results(self, box, used_characters): ship(0, 0, box) return (self.width, self.height + self.depth, self.depth, self.pswriter, used_characters) class MathtextBackendPdf(MathtextBackend): """ Store information to write a mathtext rendering to the PDF backend. """ def __init__(self): self.glyphs = [] self.rects = [] def render_glyph(self, ox, oy, info): filename = info.font.fname oy = self.height - oy + info.offset self.glyphs.append( (ox, oy, filename, info.fontsize, info.num, info.symbol_name)) def render_rect_filled(self, x1, y1, x2, y2): self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1)) def get_results(self, box, used_characters): ship(0, 0, box) return (self.width, self.height + self.depth, self.depth, self.glyphs, self.rects, used_characters) class MathtextBackendSvg(MathtextBackend): """ Store information to write a mathtext rendering to the SVG backend. """ def __init__(self): self.svg_glyphs = [] self.svg_rects = [] def render_glyph(self, ox, oy, info): oy = self.height - oy + info.offset self.svg_glyphs.append( (info.font, info.fontsize, info.num, ox, oy, info.metrics)) def render_rect_filled(self, x1, y1, x2, y2): self.svg_rects.append( (x1, self.height - y1 + 1, x2 - x1, y2 - y1)) def get_results(self, box, used_characters): ship(0, 0, box) svg_elements = Bunch(svg_glyphs = self.svg_glyphs, svg_rects = self.svg_rects) return (self.width, self.height + self.depth, self.depth, svg_elements, used_characters) class MathtextBackendPath(MathtextBackend): """ Store information to write a mathtext rendering to the text path machinery. """ def __init__(self): self.glyphs = [] self.rects = [] def render_glyph(self, ox, oy, info): oy = self.height - oy + info.offset thetext = info.num self.glyphs.append( (info.font, info.fontsize, thetext, ox, oy)) def render_rect_filled(self, x1, y1, x2, y2): self.rects.append( (x1, self.height-y2 , x2 - x1, y2 - y1)) def get_results(self, box, used_characters): ship(0, 0, box) return (self.width, self.height + self.depth, self.depth, self.glyphs, self.rects) class MathtextBackendCairo(MathtextBackend): """ Store information to write a mathtext rendering to the Cairo backend. """ def __init__(self): self.glyphs = [] self.rects = [] def render_glyph(self, ox, oy, info): oy = oy - info.offset - self.height thetext = unichr_safe(info.num) self.glyphs.append( (info.font, info.fontsize, thetext, ox, oy)) def render_rect_filled(self, x1, y1, x2, y2): self.rects.append( (x1, y1 - self.height, x2 - x1, y2 - y1)) def get_results(self, box, used_characters): ship(0, 0, box) return (self.width, self.height + self.depth, self.depth, self.glyphs, self.rects) class Fonts(object): """ An abstract base class for a system of fonts to use for mathtext. The class must be able to take symbol keys and font file names and return the character metrics. It also delegates to a backend class to do the actual drawing. """ def __init__(self, default_font_prop, mathtext_backend): """ *default_font_prop*: A :class:`~matplotlib.font_manager.FontProperties` object to use for the default non-math font, or the base font for Unicode (generic) font rendering. *mathtext_backend*: A subclass of :class:`MathTextBackend` used to delegate the actual rendering. """ self.default_font_prop = default_font_prop self.mathtext_backend = mathtext_backend self.used_characters = {} def destroy(self): """ Fix any cyclical references before the object is about to be destroyed. """ self.used_characters = None def get_kern(self, font1, fontclass1, sym1, fontsize1, font2, fontclass2, sym2, fontsize2, dpi): """ Get the kerning distance for font between *sym1* and *sym2*. *fontX*: one of the TeX font names:: tt, it, rm, cal, sf, bf or default/regular (non-math) *fontclassX*: TODO *symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma' *fontsizeX*: the fontsize in points *dpi*: the current dots-per-inch """ return 0. def get_metrics(self, font, font_class, sym, fontsize, dpi): """ *font*: one of the TeX font names:: tt, it, rm, cal, sf, bf or default/regular (non-math) *font_class*: TODO *sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma' *fontsize*: font size in points *dpi*: current dots-per-inch Returns an object with the following attributes: - *advance*: The advance distance (in points) of the glyph. - *height*: The height of the glyph in points. - *width*: The width of the glyph in points. - *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph - *iceberg* - the distance from the baseline to the top of the glyph. This corresponds to TeX's definition of "height". """ info = self._get_info(font, font_class, sym, fontsize, dpi) return info.metrics def set_canvas_size(self, w, h, d): """ Set the size of the buffer used to render the math expression. Only really necessary for the bitmap backends. """ self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d) self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth) def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi): """ Draw a glyph at - *ox*, *oy*: position - *facename*: One of the TeX face names - *font_class*: - *sym*: TeX symbol name or single character - *fontsize*: fontsize in points - *dpi*: The dpi to draw at. """ info = self._get_info(facename, font_class, sym, fontsize, dpi) realpath, stat_key = get_realpath_and_stat(info.font.fname) used_characters = self.used_characters.setdefault( stat_key, (realpath, set())) used_characters[1].add(info.num) self.mathtext_backend.render_glyph(ox, oy, info) def render_rect_filled(self, x1, y1, x2, y2): """ Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*). """ self.mathtext_backend.render_rect_filled(x1, y1, x2, y2) def get_xheight(self, font, fontsize, dpi): """ Get the xheight for the given *font* and *fontsize*. """ raise NotImplementedError() def get_underline_thickness(self, font, fontsize, dpi): """ Get the line thickness that matches the given font. Used as a base unit for drawing lines such as in a fraction or radical. """ raise NotImplementedError() def get_used_characters(self): """ Get the set of characters that were used in the math expression. Used by backends that need to subset fonts so they know which glyphs to include. """ return self.used_characters def get_results(self, box): """ Get the data needed by the backend to render the math expression. The return value is backend-specific. """ result = self.mathtext_backend.get_results(box, self.get_used_characters()) self.destroy() return result def get_sized_alternatives_for_symbol(self, fontname, sym): """ Override if your font provides multiple sizes of the same symbol. Should return a list of symbols matching *sym* in various sizes. The expression renderer will select the most appropriate size for a given situation from this list. """ return [(fontname, sym)] class TruetypeFonts(Fonts): """ A generic base class for all font setups that use Truetype fonts (through FT2Font). """ class CachedFont: def __init__(self, font): self.font = font self.charmap = font.get_charmap() self.glyphmap = dict( [(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()]) def __repr__(self): return repr(self.font) def __init__(self, default_font_prop, mathtext_backend): Fonts.__init__(self, default_font_prop, mathtext_backend) self.glyphd = {} self._fonts = {} filename = findfont(default_font_prop) default_font = self.CachedFont(FT2Font(str(filename))) self._fonts['default'] = default_font self._fonts['regular'] = default_font def destroy(self): self.glyphd = None Fonts.destroy(self) def _get_font(self, font): if font in self.fontmap: basename = self.fontmap[font] else: basename = font cached_font = self._fonts.get(basename) if cached_font is None: font = FT2Font(str(basename)) cached_font = self.CachedFont(font) self._fonts[basename] = cached_font self._fonts[font.postscript_name] = cached_font self._fonts[font.postscript_name.lower()] = cached_font return cached_font def _get_offset(self, cached_font, glyph, fontsize, dpi): if cached_font.font.postscript_name == 'Cmex10': return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0)) return 0. def _get_info(self, fontname, font_class, sym, fontsize, dpi): key = fontname, font_class, sym, fontsize, dpi bunch = self.glyphd.get(key) if bunch is not None: return bunch cached_font, num, symbol_name, fontsize, slanted = \ self._get_glyph(fontname, font_class, sym, fontsize) font = cached_font.font font.set_size(fontsize, dpi) glyph = font.load_char( num, flags=self.mathtext_backend.get_hinting_type()) xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox] offset = self._get_offset(cached_font, glyph, fontsize, dpi) metrics = Bunch( advance = glyph.linearHoriAdvance/65536.0, height = glyph.height/64.0, width = glyph.width/64.0, xmin = xmin, xmax = xmax, ymin = ymin+offset, ymax = ymax+offset, # iceberg is the equivalent of TeX's "height" iceberg = glyph.horiBearingY/64.0 + offset, slanted = slanted ) result = self.glyphd[key] = Bunch( font = font, fontsize = fontsize, postscript_name = font.postscript_name, metrics = metrics, symbol_name = symbol_name, num = num, glyph = glyph, offset = offset ) return result def get_xheight(self, font, fontsize, dpi): cached_font = self._get_font(font) cached_font.font.set_size(fontsize, dpi) pclt = cached_font.font.get_sfnt_table('pclt') if pclt is None: # Some fonts don't store the xHeight, so we do a poor man's xHeight metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi) return metrics.iceberg xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0) return xHeight def get_underline_thickness(self, font, fontsize, dpi): # This function used to grab underline thickness from the font # metrics, but that information is just too un-reliable, so it # is now hardcoded. return ((0.75 / 12.0) * fontsize * dpi) / 72.0 def get_kern(self, font1, fontclass1, sym1, fontsize1, font2, fontclass2, sym2, fontsize2, dpi): if font1 == font2 and fontsize1 == fontsize2: info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi) info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi) font = info1.font return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0 return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1, font2, fontclass2, sym2, fontsize2, dpi) class BakomaFonts(TruetypeFonts): """ Use the Bakoma TrueType fonts for rendering. Symbols are strewn about a number of font files, each of which has its own proprietary 8-bit encoding. """ _fontmap = { 'cal' : 'cmsy10', 'rm' : 'cmr10', 'tt' : 'cmtt10', 'it' : 'cmmi10', 'bf' : 'cmb10', 'sf' : 'cmss10', 'ex' : 'cmex10' } def __init__(self, *args, **kwargs): self._stix_fallback = StixFonts(*args, **kwargs) TruetypeFonts.__init__(self, *args, **kwargs) self.fontmap = {} for key, val in self._fontmap.iteritems(): fullpath = findfont(val) self.fontmap[key] = fullpath self.fontmap[val] = fullpath _slanted_symbols = set(r"\int \oint".split()) def _get_glyph(self, fontname, font_class, sym, fontsize): symbol_name = None if fontname in self.fontmap and sym in latex_to_bakoma: basename, num = latex_to_bakoma[sym] slanted = (basename == "cmmi10") or sym in self._slanted_symbols try: cached_font = self._get_font(basename) except RuntimeError: pass else: symbol_name = cached_font.font.get_glyph_name(num) num = cached_font.glyphmap[num] elif len(sym) == 1: slanted = (fontname == "it") try: cached_font = self._get_font(fontname) except RuntimeError: pass else: num = ord(sym) gid = cached_font.charmap.get(num) if gid is not None: symbol_name = cached_font.font.get_glyph_name( cached_font.charmap[num]) if symbol_name is None: return self._stix_fallback._get_glyph( fontname, font_class, sym, fontsize) return cached_font, num, symbol_name, fontsize, slanted # The Bakoma fonts contain many pre-sized alternatives for the # delimiters. The AutoSizedChar class will use these alternatives # and select the best (closest sized) glyph. _size_alternatives = { '(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'), ('ex', '\xb5'), ('ex', '\xc3')], ')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'), ('ex', '\xb6'), ('ex', '\x21')], '{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'), ('ex', '\xbd'), ('ex', '\x28')], '}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'), ('ex', '\xbe'), ('ex', '\x29')], # The fourth size of '[' is mysteriously missing from the BaKoMa # font, so I've ommitted it for both '[' and ']' '[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'), ('ex', '\x22')], ']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'), ('ex', '\x23')], r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'), ('ex', '\xb9'), ('ex', '\x24')], r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'), ('ex', '\xba'), ('ex', '\x25')], r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'), ('ex', '\xbb'), ('ex', '\x26')], r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'), ('ex', '\xbc'), ('ex', '\x27')], r'\langle' : [('ex', '\xad'), ('ex', '\x44'), ('ex', '\xbf'), ('ex', '\x2a')], r'\rangle' : [('ex', '\xae'), ('ex', '\x45'), ('ex', '\xc0'), ('ex', '\x2b')], r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'), ('ex', '\x72'), ('ex', '\x73')], r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'), ('ex', '\xc2'), ('ex', '\x2d')], r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'), ('ex', '\xcb'), ('ex', '\x2c')], r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'), ('ex', '\x64')], r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'), ('ex', '\x67')], r'<' : [('cal', 'h'), ('ex', 'D')], r'>' : [('cal', 'i'), ('ex', 'E')] } for alias, target in [('\leftparen', '('), ('\rightparent', ')'), ('\leftbrace', '{'), ('\rightbrace', '}'), ('\leftbracket', '['), ('\rightbracket', ']'), (r'\{', '{'), (r'\}', '}'), (r'\[', '['), (r'\]', ']')]: _size_alternatives[alias] = _size_alternatives[target] def get_sized_alternatives_for_symbol(self, fontname, sym): return self._size_alternatives.get(sym, [(fontname, sym)]) class UnicodeFonts(TruetypeFonts): """ An abstract base class for handling Unicode fonts. While some reasonably complete Unicode fonts (such as DejaVu) may work in some situations, the only Unicode font I'm aware of with a complete set of math symbols is STIX. This class will "fallback" on the Bakoma fonts when a required symbol can not be found in the font. """ use_cmex = True def __init__(self, *args, **kwargs): # This must come first so the backend's owner is set correctly if rcParams['mathtext.fallback_to_cm']: self.cm_fallback = BakomaFonts(*args, **kwargs) else: self.cm_fallback = None TruetypeFonts.__init__(self, *args, **kwargs) self.fontmap = {} for texfont in "cal rm tt it bf sf".split(): prop = rcParams['mathtext.' + texfont] font = findfont(prop) self.fontmap[texfont] = font prop = FontProperties('cmex10') font = findfont(prop) self.fontmap['ex'] = font _slanted_symbols = set(r"\int \oint".split()) def _map_virtual_font(self, fontname, font_class, uniindex): return fontname, uniindex def _get_glyph(self, fontname, font_class, sym, fontsize): found_symbol = False if self.use_cmex: uniindex = latex_to_cmex.get(sym) if uniindex is not None: fontname = 'ex' found_symbol = True if not found_symbol: try: uniindex = get_unicode_index(sym) found_symbol = True except ValueError: uniindex = ord('?') warn("No TeX to unicode mapping for '%s'" % sym.encode('ascii', 'backslashreplace'), MathTextWarning) fontname, uniindex = self._map_virtual_font( fontname, font_class, uniindex) new_fontname = fontname # Only characters in the "Letter" class should be italicized in 'it' # mode. Greek capital letters should be Roman. if found_symbol: if fontname == 'it': if uniindex < 0x10000: unistring = unichr(uniindex) if (not unicodedata.category(unistring)[0] == "L" or unicodedata.name(unistring).startswith("GREEK CAPITAL")): new_fontname = 'rm' slanted = (new_fontname == 'it') or sym in self._slanted_symbols found_symbol = False try: cached_font = self._get_font(new_fontname) except RuntimeError: pass else: try: glyphindex = cached_font.charmap[uniindex] found_symbol = True except KeyError: pass if not found_symbol: if self.cm_fallback: warn("Substituting with a symbol from Computer Modern.", MathTextWarning) return self.cm_fallback._get_glyph( fontname, 'it', sym, fontsize) else: if fontname in ('it', 'regular') and isinstance(self, StixFonts): return self._get_glyph('rm', font_class, sym, fontsize) warn("Font '%s' does not have a glyph for '%s' [U%x]" % (new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex), MathTextWarning) warn("Substituting with a dummy symbol.", MathTextWarning) fontname = 'rm' new_fontname = fontname cached_font = self._get_font(fontname) uniindex = 0xA4 # currency character, for lack of anything better glyphindex = cached_font.charmap[uniindex] slanted = False symbol_name = cached_font.font.get_glyph_name(glyphindex) return cached_font, uniindex, symbol_name, fontsize, slanted def get_sized_alternatives_for_symbol(self, fontname, sym): if self.cm_fallback: return self.cm_fallback.get_sized_alternatives_for_symbol( fontname, sym) return [(fontname, sym)] class StixFonts(UnicodeFonts): """ A font handling class for the STIX fonts. In addition to what UnicodeFonts provides, this class: - supports "virtual fonts" which are complete alpha numeric character sets with different font styles at special Unicode code points, such as "Blackboard". - handles sized alternative characters for the STIXSizeX fonts. """ _fontmap = { 'rm' : 'STIXGeneral', 'it' : 'STIXGeneral:italic', 'bf' : 'STIXGeneral:weight=bold', 'nonunirm' : 'STIXNonUnicode', 'nonuniit' : 'STIXNonUnicode:italic', 'nonunibf' : 'STIXNonUnicode:weight=bold', 0 : 'STIXGeneral', 1 : 'STIXSizeOneSym', 2 : 'STIXSizeTwoSym', 3 : 'STIXSizeThreeSym', 4 : 'STIXSizeFourSym', 5 : 'STIXSizeFiveSym' } use_cmex = False cm_fallback = False _sans = False def __init__(self, *args, **kwargs): TruetypeFonts.__init__(self, *args, **kwargs) self.fontmap = {} for key, name in self._fontmap.iteritems(): fullpath = findfont(name) self.fontmap[key] = fullpath self.fontmap[name] = fullpath def _map_virtual_font(self, fontname, font_class, uniindex): # Handle these "fonts" that are actually embedded in # other fonts. mapping = stix_virtual_fonts.get(fontname) if (self._sans and mapping is None and fontname not in ('regular', 'default')): mapping = stix_virtual_fonts['sf'] doing_sans_conversion = True else: doing_sans_conversion = False if mapping is not None: if isinstance(mapping, dict): mapping = mapping.get(font_class, 'rm') # Binary search for the source glyph lo = 0 hi = len(mapping) while lo < hi: mid = (lo+hi)//2 range = mapping[mid] if uniindex < range[0]: hi = mid elif uniindex <= range[1]: break else: lo = mid + 1 if uniindex >= range[0] and uniindex <= range[1]: uniindex = uniindex - range[0] + range[3] fontname = range[2] elif not doing_sans_conversion: # This will generate a dummy character uniindex = 0x1 fontname = rcParams['mathtext.default'] # Handle private use area glyphs if (fontname in ('it', 'rm', 'bf') and uniindex >= 0xe000 and uniindex <= 0xf8ff): fontname = 'nonuni' + fontname return fontname, uniindex _size_alternatives = {} def get_sized_alternatives_for_symbol(self, fontname, sym): fixes = {'\{': '{', '\}': '}', '\[': '[', '\]': ']'} sym = fixes.get(sym, sym) alternatives = self._size_alternatives.get(sym) if alternatives: return alternatives alternatives = [] try: uniindex = get_unicode_index(sym) except ValueError: return [(fontname, sym)] fix_ups = { ord('<'): 0x27e8, ord('>'): 0x27e9 } uniindex = fix_ups.get(uniindex, uniindex) for i in range(6): cached_font = self._get_font(i) glyphindex = cached_font.charmap.get(uniindex) if glyphindex is not None: alternatives.append((i, unichr_safe(uniindex))) # The largest size of the radical symbol in STIX has incorrect # metrics that cause it to be disconnected from the stem. if sym == r'\__sqrt__': alternatives = alternatives[:-1] self._size_alternatives[sym] = alternatives return alternatives class StixSansFonts(StixFonts): """ A font handling class for the STIX fonts (that uses sans-serif characters by default). """ _sans = True class StandardPsFonts(Fonts): """ Use the standard postscript fonts for rendering to backend_ps Unlike the other font classes, BakomaFont and UnicodeFont, this one requires the Ps backend. """ basepath = os.path.join( get_data_path(), 'fonts', 'afm' ) fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery 'rm' : 'pncr8a', # New Century Schoolbook 'tt' : 'pcrr8a', # Courier 'it' : 'pncri8a', # New Century Schoolbook Italic 'sf' : 'phvr8a', # Helvetica 'bf' : 'pncb8a', # New Century Schoolbook Bold None : 'psyr' # Symbol } def __init__(self, default_font_prop): Fonts.__init__(self, default_font_prop, MathtextBackendPs()) self.glyphd = {} self.fonts = {} filename = findfont(default_font_prop, fontext='afm', directory=self.basepath) if filename is None: filename = findfont('Helvetica', fontext='afm', directory=self.basepath) with open(filename, 'r') as fd: default_font = AFM(fd) default_font.fname = filename self.fonts['default'] = default_font self.fonts['regular'] = default_font self.pswriter = StringIO() def _get_font(self, font): if font in self.fontmap: basename = self.fontmap[font] else: basename = font cached_font = self.fonts.get(basename) if cached_font is None: fname = os.path.join(self.basepath, basename + ".afm") with open(fname, 'r') as fd: cached_font = AFM(fd) cached_font.fname = fname self.fonts[basename] = cached_font self.fonts[cached_font.get_fontname()] = cached_font return cached_font def _get_info (self, fontname, font_class, sym, fontsize, dpi): 'load the cmfont, metrics and glyph with caching' key = fontname, sym, fontsize, dpi tup = self.glyphd.get(key) if tup is not None: return tup # Only characters in the "Letter" class should really be italicized. # This class includes greek letters, so we're ok if (fontname == 'it' and (len(sym) > 1 or not unicodedata.category(unicode(sym)).startswith("L"))): fontname = 'rm' found_symbol = False if sym in latex_to_standard: fontname, num = latex_to_standard[sym] glyph = chr(num) found_symbol = True elif len(sym) == 1: glyph = sym num = ord(glyph) found_symbol = True else: warn("No TeX to built-in Postscript mapping for '%s'" % sym, MathTextWarning) slanted = (fontname == 'it') font = self._get_font(fontname) if found_symbol: try: symbol_name = font.get_name_char(glyph) except KeyError: warn("No glyph in standard Postscript font '%s' for '%s'" % (font.postscript_name, sym), MathTextWarning) found_symbol = False if not found_symbol: glyph = sym = '?' num = ord(glyph) symbol_name = font.get_name_char(glyph) offset = 0 scale = 0.001 * fontsize xmin, ymin, xmax, ymax = [val * scale for val in font.get_bbox_char(glyph)] metrics = Bunch( advance = font.get_width_char(glyph) * scale, width = font.get_width_char(glyph) * scale, height = font.get_height_char(glyph) * scale, xmin = xmin, xmax = xmax, ymin = ymin+offset, ymax = ymax+offset, # iceberg is the equivalent of TeX's "height" iceberg = ymax + offset, slanted = slanted ) self.glyphd[key] = Bunch( font = font, fontsize = fontsize, postscript_name = font.get_fontname(), metrics = metrics, symbol_name = symbol_name, num = num, glyph = glyph, offset = offset ) return self.glyphd[key] def get_kern(self, font1, fontclass1, sym1, fontsize1, font2, fontclass2, sym2, fontsize2, dpi): if font1 == font2 and fontsize1 == fontsize2: info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi) info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi) font = info1.font return (font.get_kern_dist(info1.glyph, info2.glyph) * 0.001 * fontsize1) return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1, font2, fontclass2, sym2, fontsize2, dpi) def get_xheight(self, font, fontsize, dpi): cached_font = self._get_font(font) return cached_font.get_xheight() * 0.001 * fontsize def get_underline_thickness(self, font, fontsize, dpi): cached_font = self._get_font(font) return cached_font.get_underline_thickness() * 0.001 * fontsize ############################################################################## # TeX-LIKE BOX MODEL # The following is based directly on the document 'woven' from the # TeX82 source code. This information is also available in printed # form: # # Knuth, Donald E.. 1986. Computers and Typesetting, Volume B: # TeX: The Program. Addison-Wesley Professional. # # The most relevant "chapters" are: # Data structures for boxes and their friends # Shipping pages out (Ship class) # Packaging (hpack and vpack) # Data structures for math mode # Subroutines for math mode # Typesetting math formulas # # Many of the docstrings below refer to a numbered "node" in that # book, e.g., node123 # # Note that (as TeX) y increases downward, unlike many other parts of # matplotlib. # How much text shrinks when going to the next-smallest level. GROW_FACTOR # must be the inverse of SHRINK_FACTOR. SHRINK_FACTOR = 0.7 GROW_FACTOR = 1.0 / SHRINK_FACTOR # The number of different sizes of chars to use, beyond which they will not # get any smaller NUM_SIZE_LEVELS = 6 # Percentage of x-height of additional horiz. space after sub/superscripts SCRIPT_SPACE = 0.2 # Percentage of x-height that sub/superscripts drop below the baseline SUBDROP = 0.3 # Percentage of x-height that superscripts drop below the baseline SUP1 = 0.5 # Percentage of x-height that subscripts drop below the baseline SUB1 = 0.0 # Percentage of x-height that superscripts are offset relative to the subscript DELTA = 0.18 class MathTextWarning(Warning): pass class Node(object): """ A node in the TeX box model """ def __init__(self): self.size = 0 def __repr__(self): return self.__internal_repr__() def __internal_repr__(self): return self.__class__.__name__ def get_kerning(self, next): return 0.0 def shrink(self): """ Shrinks one level smaller. There are only three levels of sizes, after which things will no longer get smaller. """ self.size += 1 def grow(self): """ Grows one level larger. There is no limit to how big something can get. """ self.size -= 1 def render(self, x, y): pass class Box(Node): """ Represents any node with a physical location. """ def __init__(self, width, height, depth): Node.__init__(self) self.width = width self.height = height self.depth = depth def shrink(self): Node.shrink(self) if self.size < NUM_SIZE_LEVELS: self.width *= SHRINK_FACTOR self.height *= SHRINK_FACTOR self.depth *= SHRINK_FACTOR def grow(self): Node.grow(self) self.width *= GROW_FACTOR self.height *= GROW_FACTOR self.depth *= GROW_FACTOR def render(self, x1, y1, x2, y2): pass class Vbox(Box): """ A box with only height (zero width). """ def __init__(self, height, depth): Box.__init__(self, 0., height, depth) class Hbox(Box): """ A box with only width (zero height and depth). """ def __init__(self, width): Box.__init__(self, width, 0., 0.) class Char(Node): """ Represents a single character. Unlike TeX, the font information and metrics are stored with each :class:`Char` to make it easier to lookup the font metrics when needed. Note that TeX boxes have a width, height, and depth, unlike Type1 and Truetype which use a full bounding box and an advance in the x-direction. The metrics must be converted to the TeX way, and the advance (if different from width) must be converted into a :class:`Kern` node when the :class:`Char` is added to its parent :class:`Hlist`. """ def __init__(self, c, state): Node.__init__(self) self.c = c self.font_output = state.font_output assert isinstance(state.font, (str, unicode, int)) self.font = state.font self.font_class = state.font_class self.fontsize = state.fontsize self.dpi = state.dpi # The real width, height and depth will be set during the # pack phase, after we know the real fontsize self._update_metrics() def __internal_repr__(self): return '`%s`' % self.c def _update_metrics(self): metrics = self._metrics = self.font_output.get_metrics( self.font, self.font_class, self.c, self.fontsize, self.dpi) if self.c == ' ': self.width = metrics.advance else: self.width = metrics.width self.height = metrics.iceberg self.depth = -(metrics.iceberg - metrics.height) def is_slanted(self): return self._metrics.slanted def get_kerning(self, next): """ Return the amount of kerning between this and the given character. Called when characters are strung together into :class:`Hlist` to create :class:`Kern` nodes. """ advance = self._metrics.advance - self.width kern = 0. if isinstance(next, Char): kern = self.font_output.get_kern( self.font, self.font_class, self.c, self.fontsize, next.font, next.font_class, next.c, next.fontsize, self.dpi) return advance + kern def render(self, x, y): """ Render the character to the canvas """ self.font_output.render_glyph( x, y, self.font, self.font_class, self.c, self.fontsize, self.dpi) def shrink(self): Node.shrink(self) if self.size < NUM_SIZE_LEVELS: self.fontsize *= SHRINK_FACTOR self.width *= SHRINK_FACTOR self.height *= SHRINK_FACTOR self.depth *= SHRINK_FACTOR def grow(self): Node.grow(self) self.fontsize *= GROW_FACTOR self.width *= GROW_FACTOR self.height *= GROW_FACTOR self.depth *= GROW_FACTOR class Accent(Char): """ The font metrics need to be dealt with differently for accents, since they are already offset correctly from the baseline in TrueType fonts. """ def _update_metrics(self): metrics = self._metrics = self.font_output.get_metrics( self.font, self.font_class, self.c, self.fontsize, self.dpi) self.width = metrics.xmax - metrics.xmin self.height = metrics.ymax - metrics.ymin self.depth = 0 def shrink(self): Char.shrink(self) self._update_metrics() def grow(self): Char.grow(self) self._update_metrics() def render(self, x, y): """ Render the character to the canvas. """ self.font_output.render_glyph( x - self._metrics.xmin, y + self._metrics.ymin, self.font, self.font_class, self.c, self.fontsize, self.dpi) class List(Box): """ A list of nodes (either horizontal or vertical). """ def __init__(self, elements): Box.__init__(self, 0., 0., 0.) self.shift_amount = 0. # An arbitrary offset self.children = elements # The child nodes of this list # The following parameters are set in the vpack and hpack functions self.glue_set = 0. # The glue setting of this list self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching self.glue_order = 0 # The order of infinity (0 - 3) for the glue def __repr__(self): return '[%s <%.02f %.02f %.02f %.02f> %s]' % ( self.__internal_repr__(), self.width, self.height, self.depth, self.shift_amount, ' '.join([repr(x) for x in self.children])) def _determine_order(self, totals): """ A helper function to determine the highest order of glue used by the members of this list. Used by vpack and hpack. """ o = 0 for i in range(len(totals) - 1, 0, -1): if totals[i] != 0.0: o = i break return o def _set_glue(self, x, sign, totals, error_type): o = self._determine_order(totals) self.glue_order = o self.glue_sign = sign if totals[o] != 0.: self.glue_set = x / totals[o] else: self.glue_sign = 0 self.glue_ratio = 0. if o == 0: if len(self.children): warn("%s %s: %r" % (error_type, self.__class__.__name__, self), MathTextWarning) def shrink(self): for child in self.children: child.shrink() Box.shrink(self) if self.size < NUM_SIZE_LEVELS: self.shift_amount *= SHRINK_FACTOR self.glue_set *= SHRINK_FACTOR def grow(self): for child in self.children: child.grow() Box.grow(self) self.shift_amount *= GROW_FACTOR self.glue_set *= GROW_FACTOR class Hlist(List): """ A horizontal list of boxes. """ def __init__(self, elements, w=0., m='additional', do_kern=True): List.__init__(self, elements) if do_kern: self.kern() self.hpack() def kern(self): """ Insert :class:`Kern` nodes between :class:`Char` nodes to set kerning. The :class:`Char` nodes themselves determine the amount of kerning they need (in :meth:`~Char.get_kerning`), and this function just creates the linked list in the correct way. """ new_children = [] num_children = len(self.children) if num_children: for i in range(num_children): elem = self.children[i] if i < num_children - 1: next = self.children[i + 1] else: next = None new_children.append(elem) kerning_distance = elem.get_kerning(next) if kerning_distance != 0.: kern = Kern(kerning_distance) new_children.append(kern) self.children = new_children # This is a failed experiment to fake cross-font kerning. # def get_kerning(self, next): # if len(self.children) >= 2 and isinstance(self.children[-2], Char): # if isinstance(next, Char): # print "CASE A" # return self.children[-2].get_kerning(next) # elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char): # print "CASE B" # result = self.children[-2].get_kerning(next.children[0]) # print result # return result # return 0.0 def hpack(self, w=0., m='additional'): """ The main duty of :meth:`hpack` is to compute the dimensions of the resulting boxes, and to adjust the glue if one of those dimensions is pre-specified. The computed sizes normally enclose all of the material inside the new box; but some items may stick out if negative glue is used, if the box is overfull, or if a ``\\vbox`` includes other boxes that have been shifted left. - *w*: specifies a width - *m*: is either 'exactly' or 'additional'. Thus, ``hpack(w, 'exactly')`` produces a box whose width is exactly *w*, while ``hpack(w, 'additional')`` yields a box whose width is the natural width plus *w*. The default values produce a box with the natural width. """ # I don't know why these get reset in TeX. Shift_amount is pretty # much useless if we do. #self.shift_amount = 0. h = 0. d = 0. x = 0. total_stretch = [0.] * 4 total_shrink = [0.] * 4 for p in self.children: if isinstance(p, Char): x += p.width h = max(h, p.height) d = max(d, p.depth) elif isinstance(p, Box): x += p.width if not isinf(p.height) and not isinf(p.depth): s = getattr(p, 'shift_amount', 0.) h = max(h, p.height - s) d = max(d, p.depth + s) elif isinstance(p, Glue): glue_spec = p.glue_spec x += glue_spec.width total_stretch[glue_spec.stretch_order] += glue_spec.stretch total_shrink[glue_spec.shrink_order] += glue_spec.shrink elif isinstance(p, Kern): x += p.width self.height = h self.depth = d if m == 'additional': w += x self.width = w x = w - x if x == 0.: self.glue_sign = 0 self.glue_order = 0 self.glue_ratio = 0. return if x > 0.: self._set_glue(x, 1, total_stretch, "Overfull") else: self._set_glue(x, -1, total_shrink, "Underfull") class Vlist(List): """ A vertical list of boxes. """ def __init__(self, elements, h=0., m='additional'): List.__init__(self, elements) self.vpack() def vpack(self, h=0., m='additional', l=float(inf)): """ The main duty of :meth:`vpack` is to compute the dimensions of the resulting boxes, and to adjust the glue if one of those dimensions is pre-specified. - *h*: specifies a height - *m*: is either 'exactly' or 'additional'. - *l*: a maximum height Thus, ``vpack(h, 'exactly')`` produces a box whose height is exactly *h*, while ``vpack(h, 'additional')`` yields a box whose height is the natural height plus *h*. The default values produce a box with the natural width. """ # I don't know why these get reset in TeX. Shift_amount is pretty # much useless if we do. # self.shift_amount = 0. w = 0. d = 0. x = 0. total_stretch = [0.] * 4 total_shrink = [0.] * 4 for p in self.children: if isinstance(p, Box): x += d + p.height d = p.depth if not isinf(p.width): s = getattr(p, 'shift_amount', 0.) w = max(w, p.width + s) elif isinstance(p, Glue): x += d d = 0. glue_spec = p.glue_spec x += glue_spec.width total_stretch[glue_spec.stretch_order] += glue_spec.stretch total_shrink[glue_spec.shrink_order] += glue_spec.shrink elif isinstance(p, Kern): x += d + p.width d = 0. elif isinstance(p, Char): raise RuntimeError("Internal mathtext error: Char node found in Vlist.") self.width = w if d > l: x += d - l self.depth = l else: self.depth = d if m == 'additional': h += x self.height = h x = h - x if x == 0: self.glue_sign = 0 self.glue_order = 0 self.glue_ratio = 0. return if x > 0.: self._set_glue(x, 1, total_stretch, "Overfull") else: self._set_glue(x, -1, total_shrink, "Underfull") class Rule(Box): """ A :class:`Rule` node stands for a solid black rectangle; it has *width*, *depth*, and *height* fields just as in an :class:`Hlist`. However, if any of these dimensions is inf, the actual value will be determined by running the rule up to the boundary of the innermost enclosing box. This is called a "running dimension." The width is never running in an :class:`Hlist`; the height and depth are never running in a :class:`Vlist`. """ def __init__(self, width, height, depth, state): Box.__init__(self, width, height, depth) self.font_output = state.font_output def render(self, x, y, w, h): self.font_output.render_rect_filled(x, y, x + w, y + h) class Hrule(Rule): """ Convenience class to create a horizontal rule. """ def __init__(self, state, thickness=None): if thickness is None: thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) height = depth = thickness * 0.5 Rule.__init__(self, inf, height, depth, state) class Vrule(Rule): """ Convenience class to create a vertical rule. """ def __init__(self, state): thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) Rule.__init__(self, thickness, inf, inf, state) class Glue(Node): """ Most of the information in this object is stored in the underlying :class:`GlueSpec` class, which is shared between multiple glue objects. (This is a memory optimization which probably doesn't matter anymore, but it's easier to stick to what TeX does.) """ def __init__(self, glue_type, copy=False): Node.__init__(self) self.glue_subtype = 'normal' if is_string_like(glue_type): glue_spec = GlueSpec.factory(glue_type) elif isinstance(glue_type, GlueSpec): glue_spec = glue_type else: raise ArgumentError("glue_type must be a glue spec name or instance.") if copy: glue_spec = glue_spec.copy() self.glue_spec = glue_spec def shrink(self): Node.shrink(self) if self.size < NUM_SIZE_LEVELS: if self.glue_spec.width != 0.: self.glue_spec = self.glue_spec.copy() self.glue_spec.width *= SHRINK_FACTOR def grow(self): Node.grow(self) if self.glue_spec.width != 0.: self.glue_spec = self.glue_spec.copy() self.glue_spec.width *= GROW_FACTOR class GlueSpec(object): """ See :class:`Glue`. """ def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0): self.width = width self.stretch = stretch self.stretch_order = stretch_order self.shrink = shrink self.shrink_order = shrink_order def copy(self): return GlueSpec( self.width, self.stretch, self.stretch_order, self.shrink, self.shrink_order) def factory(cls, glue_type): return cls._types[glue_type] factory = classmethod(factory) GlueSpec._types = { 'fil': GlueSpec(0., 1., 1, 0., 0), 'fill': GlueSpec(0., 1., 2, 0., 0), 'filll': GlueSpec(0., 1., 3, 0., 0), 'neg_fil': GlueSpec(0., 0., 0, 1., 1), 'neg_fill': GlueSpec(0., 0., 0, 1., 2), 'neg_filll': GlueSpec(0., 0., 0, 1., 3), 'empty': GlueSpec(0., 0., 0, 0., 0), 'ss': GlueSpec(0., 1., 1, -1., 1) } # Some convenient ways to get common kinds of glue class Fil(Glue): def __init__(self): Glue.__init__(self, 'fil') class Fill(Glue): def __init__(self): Glue.__init__(self, 'fill') class Filll(Glue): def __init__(self): Glue.__init__(self, 'filll') class NegFil(Glue): def __init__(self): Glue.__init__(self, 'neg_fil') class NegFill(Glue): def __init__(self): Glue.__init__(self, 'neg_fill') class NegFilll(Glue): def __init__(self): Glue.__init__(self, 'neg_filll') class SsGlue(Glue): def __init__(self): Glue.__init__(self, 'ss') class HCentered(Hlist): """ A convenience class to create an :class:`Hlist` whose contents are centered within its enclosing box. """ def __init__(self, elements): Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()], do_kern=False) class VCentered(Hlist): """ A convenience class to create a :class:`Vlist` whose contents are centered within its enclosing box. """ def __init__(self, elements): Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()]) class Kern(Node): """ A :class:`Kern` node has a width field to specify a (normally negative) amount of spacing. This spacing correction appears in horizontal lists between letters like A and V when the font designer said that it looks better to move them closer together or further apart. A kern node can also appear in a vertical list, when its *width* denotes additional spacing in the vertical direction. """ height = 0 depth = 0 def __init__(self, width): Node.__init__(self) self.width = width def __repr__(self): return "k%.02f" % self.width def shrink(self): Node.shrink(self) if self.size < NUM_SIZE_LEVELS: self.width *= SHRINK_FACTOR def grow(self): Node.grow(self) self.width *= GROW_FACTOR class SubSuperCluster(Hlist): """ :class:`SubSuperCluster` is a sort of hack to get around that fact that this code do a two-pass parse like TeX. This lets us store enough information in the hlist itself, namely the nucleus, sub- and super-script, such that if another script follows that needs to be attached, it can be reconfigured on the fly. """ def __init__(self): self.nucleus = None self.sub = None self.super = None Hlist.__init__(self, []) class AutoHeightChar(Hlist): """ :class:`AutoHeightChar` will create a character as close to the given height and depth as possible. When using a font with multiple height versions of some characters (such as the BaKoMa fonts), the correct glyph will be selected, otherwise this will always just return a scaled version of the glyph. """ def __init__(self, c, height, depth, state, always=False, factor=None): alternatives = state.font_output.get_sized_alternatives_for_symbol( state.font, c) state = state.copy() target_total = height + depth for fontname, sym in alternatives: state.font = fontname char = Char(sym, state) if char.height + char.depth >= target_total: break if factor is None: factor = target_total / (char.height + char.depth) state.fontsize *= factor char = Char(sym, state) shift = (depth - char.depth) Hlist.__init__(self, [char]) self.shift_amount = shift class AutoWidthChar(Hlist): """ :class:`AutoWidthChar` will create a character as close to the given width as possible. When using a font with multiple width versions of some characters (such as the BaKoMa fonts), the correct glyph will be selected, otherwise this will always just return a scaled version of the glyph. """ def __init__(self, c, width, state, always=False, char_class=Char): alternatives = state.font_output.get_sized_alternatives_for_symbol( state.font, c) state = state.copy() for fontname, sym in alternatives: state.font = fontname char = char_class(sym, state) if char.width >= width: break factor = width / char.width state.fontsize *= factor char = char_class(sym, state) Hlist.__init__(self, [char]) self.width = char.width class Ship(object): """ Once the boxes have been set up, this sends them to output. Since boxes can be inside of boxes inside of boxes, the main work of :class:`Ship` is done by two mutually recursive routines, :meth:`hlist_out` and :meth:`vlist_out`, which traverse the :class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal and vertical boxes. The global variables used in TeX to store state as it processes have become member variables here. """ def __call__(self, ox, oy, box): self.max_push = 0 # Deepest nesting of push commands so far self.cur_s = 0 self.cur_v = 0. self.cur_h = 0. self.off_h = ox self.off_v = oy + box.height self.hlist_out(box) def clamp(value): if value < -1000000000.: return -1000000000. if value > 1000000000.: return 1000000000. return value clamp = staticmethod(clamp) def hlist_out(self, box): cur_g = 0 cur_glue = 0. glue_order = box.glue_order glue_sign = box.glue_sign base_line = self.cur_v left_edge = self.cur_h self.cur_s += 1 self.max_push = max(self.cur_s, self.max_push) clamp = self.clamp for p in box.children: if isinstance(p, Char): p.render(self.cur_h + self.off_h, self.cur_v + self.off_v) self.cur_h += p.width elif isinstance(p, Kern): self.cur_h += p.width elif isinstance(p, List): # node623 if len(p.children) == 0: self.cur_h += p.width else: edge = self.cur_h self.cur_v = base_line + p.shift_amount if isinstance(p, Hlist): self.hlist_out(p) else: # p.vpack(box.height + box.depth, 'exactly') self.vlist_out(p) self.cur_h = edge + p.width self.cur_v = base_line elif isinstance(p, Box): # node624 rule_height = p.height rule_depth = p.depth rule_width = p.width if isinf(rule_height): rule_height = box.height if isinf(rule_depth): rule_depth = box.depth if rule_height > 0 and rule_width > 0: self.cur_v = baseline + rule_depth p.render(self.cur_h + self.off_h, self.cur_v + self.off_v, rule_width, rule_height) self.cur_v = baseline self.cur_h += rule_width elif isinstance(p, Glue): # node625 glue_spec = p.glue_spec rule_width = glue_spec.width - cur_g if glue_sign != 0: # normal if glue_sign == 1: # stretching if glue_spec.stretch_order == glue_order: cur_glue += glue_spec.stretch cur_g = round(clamp(float(box.glue_set) * cur_glue)) elif glue_spec.shrink_order == glue_order: cur_glue += glue_spec.shrink cur_g = round(clamp(float(box.glue_set) * cur_glue)) rule_width += cur_g self.cur_h += rule_width self.cur_s -= 1 def vlist_out(self, box): cur_g = 0 cur_glue = 0. glue_order = box.glue_order glue_sign = box.glue_sign self.cur_s += 1 self.max_push = max(self.max_push, self.cur_s) left_edge = self.cur_h self.cur_v -= box.height top_edge = self.cur_v clamp = self.clamp for p in box.children: if isinstance(p, Kern): self.cur_v += p.width elif isinstance(p, List): if len(p.children) == 0: self.cur_v += p.height + p.depth else: self.cur_v += p.height self.cur_h = left_edge + p.shift_amount save_v = self.cur_v p.width = box.width if isinstance(p, Hlist): self.hlist_out(p) else: self.vlist_out(p) self.cur_v = save_v + p.depth self.cur_h = left_edge elif isinstance(p, Box): rule_height = p.height rule_depth = p.depth rule_width = p.width if isinf(rule_width): rule_width = box.width rule_height += rule_depth if rule_height > 0 and rule_depth > 0: self.cur_v += rule_height p.render(self.cur_h + self.off_h, self.cur_v + self.off_v, rule_width, rule_height) elif isinstance(p, Glue): glue_spec = p.glue_spec rule_height = glue_spec.width - cur_g if glue_sign != 0: # normal if glue_sign == 1: # stretching if glue_spec.stretch_order == glue_order: cur_glue += glue_spec.stretch cur_g = round(clamp(float(box.glue_set) * cur_glue)) elif glue_spec.shrink_order == glue_order: # shrinking cur_glue += glue_spec.shrink cur_g = round(clamp(float(box.glue_set) * cur_glue)) rule_height += cur_g self.cur_v += rule_height elif isinstance(p, Char): raise RuntimeError("Internal mathtext error: Char node found in vlist") self.cur_s -= 1 ship = Ship() ############################################################################## # PARSER def Error(msg): """ Helper class to raise parser errors. """ def raise_error(s, loc, toks): raise ParseFatalException(s, loc, msg) empty = Empty() empty.setParseAction(raise_error) return empty class Parser(object): """ This is the pyparsing-based parser for math expressions. It actually parses full strings *containing* math expressions, in that raw text may also appear outside of pairs of ``$``. The grammar is based directly on that in TeX, though it cuts a few corners. """ _binary_operators = set(r''' + * \pm \sqcap \rhd \mp \sqcup \unlhd \times \vee \unrhd \div \wedge \oplus \ast \setminus \ominus \star \wr \otimes \circ \diamond \oslash \bullet \bigtriangleup \odot \cdot \bigtriangledown \bigcirc \cap \triangleleft \dagger \cup \triangleright \ddagger \uplus \lhd \amalg'''.split()) _relation_symbols = set(r''' = < > : \leq \geq \equiv \models \prec \succ \sim \perp \preceq \succeq \simeq \mid \ll \gg \asymp \parallel \subset \supset \approx \bowtie \subseteq \supseteq \cong \Join \sqsubset \sqsupset \neq \smile \sqsubseteq \sqsupseteq \doteq \frown \in \ni \propto \vdash \dashv \dots'''.split()) _arrow_symbols = set(r''' \leftarrow \longleftarrow \uparrow \Leftarrow \Longleftarrow \Uparrow \rightarrow \longrightarrow \downarrow \Rightarrow \Longrightarrow \Downarrow \leftrightarrow \longleftrightarrow \updownarrow \Leftrightarrow \Longleftrightarrow \Updownarrow \mapsto \longmapsto \nearrow \hookleftarrow \hookrightarrow \searrow \leftharpoonup \rightharpoonup \swarrow \leftharpoondown \rightharpoondown \nwarrow \rightleftharpoons \leadsto'''.split()) _spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols _punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split()) _overunder_symbols = set(r''' \sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee \bigwedge \bigodot \bigotimes \bigoplus \biguplus '''.split()) _overunder_functions = set( r"lim liminf limsup sup max min".split()) _dropsub_symbols = set(r'''\int \oint'''.split()) _fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split()) _function_names = set(""" arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan coth inf max tanh""".split()) _ambi_delim = set(r""" | \| / \backslash \uparrow \downarrow \updownarrow \Uparrow \Downarrow \Updownarrow .""".split()) _left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split()) _right_delim = set(r") ] \} > \rfloor \rangle \rceil".split()) def __init__(self): # All forward declarations are here accent = Forward() ambi_delim = Forward() apostrophe = Forward() auto_delim = Forward() binom = Forward() bslash = Forward() c_over_c = Forward() customspace = Forward() end_group = Forward() float_literal = Forward() font = Forward() frac = Forward() function = Forward() genfrac = Forward() group = Forward() int_literal = Forward() latexfont = Forward() lbracket = Forward() left_delim = Forward() lbrace = Forward() main = Forward() math = Forward() math_string = Forward() non_math = Forward() operatorname = Forward() overline = Forward() placeable = Forward() rbrace = Forward() rbracket = Forward() required_group = Forward() right_delim = Forward() right_delim_safe = Forward() simple = Forward() simple_group = Forward() single_symbol = Forward() space = Forward() sqrt = Forward() stackrel = Forward() start_group = Forward() subsuper = Forward() subsuperop = Forward() symbol = Forward() symbol_name = Forward() token = Forward() unknown_symbol = Forward() # Set names on everything -- very useful for debugging for key, val in locals().items(): if key != 'self': val.setName(key) float_literal << Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)") int_literal << Regex("[-+]?[0-9]+") lbrace << Literal('{').suppress() rbrace << Literal('}').suppress() lbracket << Literal('[').suppress() rbracket << Literal(']').suppress() bslash << Literal('\\') space << oneOf(self._space_widths.keys()) customspace << (Suppress(Literal(r'\hspace')) - ((lbrace + float_literal + rbrace) | Error(r"Expected \hspace{n}"))) unicode_range = u"\U00000080-\U0001ffff" single_symbol << Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range) symbol_name << (Combine(bslash + oneOf(tex2uni.keys())) + FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd())) symbol << (single_symbol | symbol_name).leaveWhitespace() apostrophe << Regex("'+") c_over_c << Suppress(bslash) + oneOf(self._char_over_chars.keys()) accent << Group( Suppress(bslash) + oneOf(self._accent_map.keys() + list(self._wide_accents)) - placeable ) function << Suppress(bslash) + oneOf(list(self._function_names)) start_group << Optional(latexfont) + lbrace end_group << rbrace.copy() simple_group << Group(lbrace + ZeroOrMore(token) + rbrace) required_group<< Group(lbrace + OneOrMore(token) + rbrace) group << Group(start_group + ZeroOrMore(token) + end_group) font << Suppress(bslash) + oneOf(list(self._fontnames)) latexfont << Suppress(bslash) + oneOf(['math' + x for x in self._fontnames]) frac << Group( Suppress(Literal(r"\frac")) - ((required_group + required_group) | Error(r"Expected \frac{num}{den}")) ) stackrel << Group( Suppress(Literal(r"\stackrel")) - ((required_group + required_group) | Error(r"Expected \stackrel{num}{den}")) ) binom << Group( Suppress(Literal(r"\binom")) - ((required_group + required_group) | Error(r"Expected \binom{num}{den}")) ) ambi_delim << oneOf(list(self._ambi_delim)) left_delim << oneOf(list(self._left_delim)) right_delim << oneOf(list(self._right_delim)) right_delim_safe << oneOf(list(self._right_delim - set(['}'])) + [r'\}']) genfrac << Group( Suppress(Literal(r"\genfrac")) - (((lbrace + Optional(ambi_delim | left_delim, default='') + rbrace) + (lbrace + Optional(ambi_delim | right_delim_safe, default='') + rbrace) + (lbrace + float_literal + rbrace) + simple_group + required_group + required_group) | Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}")) ) sqrt << Group( Suppress(Literal(r"\sqrt")) - ((Optional(lbracket + int_literal + rbracket, default=None) + required_group) | Error("Expected \sqrt{value}")) ) overline << Group( Suppress(Literal(r"\overline")) - (required_group | Error("Expected \overline{value}")) ) unknown_symbol<< Combine(bslash + Regex("[A-Za-z]*")) operatorname << Group( Suppress(Literal(r"\operatorname")) - ((lbrace + ZeroOrMore(simple | unknown_symbol) + rbrace) | Error("Expected \operatorname{value}")) ) placeable << ( accent # Must be first | symbol # Must be second | c_over_c | function | group | frac | stackrel | binom | genfrac | sqrt | overline | operatorname ) simple << ( space | customspace | font | subsuper ) subsuperop << oneOf(["_", "^"]) subsuper << Group( (Optional(placeable) + OneOrMore(subsuperop - placeable) + Optional(apostrophe)) | (placeable + Optional(apostrophe)) | apostrophe ) token << ( simple | auto_delim | unknown_symbol # Must be last ) auto_delim << (Suppress(Literal(r"\left")) - ((left_delim | ambi_delim) | Error("Expected a delimiter")) + Group(ZeroOrMore(simple | auto_delim)) + Suppress(Literal(r"\right")) - ((right_delim | ambi_delim) | Error("Expected a delimiter")) ) math << OneOrMore(token) math_string << QuotedString('$', '\\', unquoteResults=False) non_math << Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace() main << (non_math + ZeroOrMore(math_string + non_math)) + StringEnd() # Set actions for key, val in locals().items(): if hasattr(self, key): val.setParseAction(getattr(self, key)) self._expression = main self._math_expression = math def parse(self, s, fonts_object, fontsize, dpi): """ Parse expression *s* using the given *fonts_object* for output, at the given *fontsize* and *dpi*. Returns the parse tree of :class:`Node` instances. """ self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)] self._em_width_cache = {} try: result = self._expression.parseString(s) except ParseBaseException as err: raise ValueError("\n".join([ "", err.line, " " * (err.column - 1) + "^", str(err)])) self._state_stack = None self._em_width_cache = {} self._expression.resetCache() return result[0] # The state of the parser is maintained in a stack. Upon # entering and leaving a group { } or math/non-math, the stack # is pushed and popped accordingly. The current state always # exists in the top element of the stack. class State(object): """ Stores the state of the parser. States are pushed and popped from a stack as necessary, and the "current" state is always at the top of the stack. """ def __init__(self, font_output, font, font_class, fontsize, dpi): self.font_output = font_output self._font = font self.font_class = font_class self.fontsize = fontsize self.dpi = dpi def copy(self): return Parser.State( self.font_output, self.font, self.font_class, self.fontsize, self.dpi) def _get_font(self): return self._font def _set_font(self, name): if name in ('rm', 'it', 'bf'): self.font_class = name self._font = name font = property(_get_font, _set_font) def get_state(self): """ Get the current :class:`State` of the parser. """ return self._state_stack[-1] def pop_state(self): """ Pop a :class:`State` off of the stack. """ self._state_stack.pop() def push_state(self): """ Push a new :class:`State` onto the stack which is just a copy of the current state. """ self._state_stack.append(self.get_state().copy()) def main(self, s, loc, toks): #~ print "finish", toks return [Hlist(toks)] def math_string(self, s, loc, toks): # print "math_string", toks[0][1:-1] return self._math_expression.parseString(toks[0][1:-1]) def math(self, s, loc, toks): #~ print "math", toks hlist = Hlist(toks) self.pop_state() return [hlist] def non_math(self, s, loc, toks): #~ print "non_math", toks s = toks[0].replace(r'\$', '$') symbols = [Char(c, self.get_state()) for c in s] hlist = Hlist(symbols) # We're going into math now, so set font to 'it' self.push_state() self.get_state().font = rcParams['mathtext.default'] return [hlist] def _make_space(self, percentage): # All spaces are relative to em width state = self.get_state() key = (state.font, state.fontsize, state.dpi) width = self._em_width_cache.get(key) if width is None: metrics = state.font_output.get_metrics( state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi) width = metrics.advance self._em_width_cache[key] = width return Kern(width * percentage) _space_widths = { r'\ ' : 0.3, r'\,' : 0.4, r'\;' : 0.8, r'\quad' : 1.6, r'\qquad' : 3.2, r'\!' : -0.4, r'\/' : 0.4 } def space(self, s, loc, toks): assert(len(toks)==1) num = self._space_widths[toks[0]] box = self._make_space(num) return [box] def customspace(self, s, loc, toks): return [self._make_space(float(toks[0]))] def symbol(self, s, loc, toks): # print "symbol", toks c = toks[0] try: char = Char(c, self.get_state()) except ValueError: raise ParseFatalException(s, loc, "Unknown symbol: %s" % c) if c in self._spaced_symbols: return [Hlist( [self._make_space(0.2), char, self._make_space(0.2)] , do_kern = False)] elif c in self._punctuation_symbols: return [Hlist( [char, self._make_space(0.2)] , do_kern = False)] return [char] def unknown_symbol(self, s, loc, toks): # print "symbol", toks c = toks[0] raise ParseFatalException(s, loc, "Unknown symbol: %s" % c) _char_over_chars = { # The first 2 entires in the tuple are (font, char, sizescale) for # the two symbols under and over. The third element is the space # (in multiples of underline height) r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0), } def c_over_c(self, s, loc, toks): sym = toks[0] state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) under_desc, over_desc, space = \ self._char_over_chars.get(sym, (None, None, 0.0)) if under_desc is None: raise ParseFatalException("Error parsing symbol") over_state = state.copy() if over_desc[0] is not None: over_state.font = over_desc[0] over_state.fontsize *= over_desc[2] over = Accent(over_desc[1], over_state) under_state = state.copy() if under_desc[0] is not None: under_state.font = under_desc[0] under_state.fontsize *= under_desc[2] under = Char(under_desc[1], under_state) width = max(over.width, under.width) over_centered = HCentered([over]) over_centered.hpack(width, 'exactly') under_centered = HCentered([under]) under_centered.hpack(width, 'exactly') return Vlist([ over_centered, Vbox(0., thickness * space), under_centered ]) _accent_map = { r'hat' : r'\circumflexaccent', r'breve' : r'\combiningbreve', r'bar' : r'\combiningoverline', r'grave' : r'\combininggraveaccent', r'acute' : r'\combiningacuteaccent', r'ddot' : r'\combiningdiaeresis', r'tilde' : r'\combiningtilde', r'dot' : r'\combiningdotabove', r'vec' : r'\combiningrightarrowabove', r'"' : r'\combiningdiaeresis', r"`" : r'\combininggraveaccent', r"'" : r'\combiningacuteaccent', r'~' : r'\combiningtilde', r'.' : r'\combiningdotabove', r'^' : r'\circumflexaccent', r'overrightarrow' : r'\rightarrow', r'overleftarrow' : r'\leftarrow' } _wide_accents = set(r"widehat widetilde widebar".split()) def accent(self, s, loc, toks): assert(len(toks)==1) state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) if len(toks[0]) != 2: raise ParseFatalException("Error parsing accent") accent, sym = toks[0] if accent in self._wide_accents: accent = AutoWidthChar( '\\' + accent, sym.width, state, char_class=Accent) else: accent = Accent(self._accent_map[accent], state) centered = HCentered([accent]) centered.hpack(sym.width, 'exactly') return Vlist([ centered, Vbox(0., thickness * 2.0), Hlist([sym]) ]) def function(self, s, loc, toks): #~ print "function", toks self.push_state() state = self.get_state() state.font = 'rm' hlist = Hlist([Char(c, state) for c in toks[0]]) self.pop_state() hlist.function_name = toks[0] return hlist def operatorname(self, s, loc, toks): self.push_state() state = self.get_state() state.font = 'rm' # Change the font of Chars, but leave Kerns alone for c in toks[0]: if isinstance(c, Char): c.font = 'rm' c._update_metrics() self.pop_state() return Hlist(toks[0]) def start_group(self, s, loc, toks): self.push_state() # Deal with LaTeX-style font tokens if len(toks): self.get_state().font = toks[0][4:] return [] def group(self, s, loc, toks): grp = Hlist(toks[0]) return [grp] required_group = simple_group = group def end_group(self, s, loc, toks): self.pop_state() return [] def font(self, s, loc, toks): assert(len(toks)==1) name = toks[0] self.get_state().font = name return [] def is_overunder(self, nucleus): if isinstance(nucleus, Char): return nucleus.c in self._overunder_symbols elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'): return nucleus.function_name in self._overunder_functions return False def is_dropsub(self, nucleus): if isinstance(nucleus, Char): return nucleus.c in self._dropsub_symbols return False def is_slanted(self, nucleus): if isinstance(nucleus, Char): return nucleus.is_slanted() return False def subsuper(self, s, loc, toks): assert(len(toks)==1) # print 'subsuper', toks nucleus = None sub = None super = None # Pick all of the apostrophe's out napostrophes = 0 new_toks = [] for tok in toks[0]: if isinstance(tok, str) and tok not in ('^', '_'): napostrophes += len(tok) else: new_toks.append(tok) toks = new_toks if len(toks) == 0: assert napostrophes nucleus = Hbox(0.0) elif len(toks) == 1: if not napostrophes: return toks[0] # .asList() else: nucleus = toks[0] elif len(toks) == 2: op, next = toks nucleus = Hbox(0.0) if op == '_': sub = next else: super = next elif len(toks) == 3: nucleus, op, next = toks if op == '_': sub = next else: super = next elif len(toks) == 5: nucleus, op1, next1, op2, next2 = toks if op1 == op2: if op1 == '_': raise ParseFatalException("Double subscript") else: raise ParseFatalException("Double superscript") if op1 == '_': sub = next1 super = next2 else: super = next1 sub = next2 else: raise ParseFatalException( "Subscript/superscript sequence is too long. " "Use braces { } to remove ambiguity.") state = self.get_state() rule_thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) xHeight = state.font_output.get_xheight( state.font, state.fontsize, state.dpi) if napostrophes: if super is None: super = Hlist([]) for i in range(napostrophes): super.children.extend(self.symbol(s, loc, ['\prime'])) # Handle over/under symbols, such as sum or integral if self.is_overunder(nucleus): vlist = [] shift = 0. width = nucleus.width if super is not None: super.shrink() width = max(width, super.width) if sub is not None: sub.shrink() width = max(width, sub.width) if super is not None: hlist = HCentered([super]) hlist.hpack(width, 'exactly') vlist.extend([hlist, Kern(rule_thickness * 3.0)]) hlist = HCentered([nucleus]) hlist.hpack(width, 'exactly') vlist.append(hlist) if sub is not None: hlist = HCentered([sub]) hlist.hpack(width, 'exactly') vlist.extend([Kern(rule_thickness * 3.0), hlist]) shift = hlist.height vlist = Vlist(vlist) vlist.shift_amount = shift + nucleus.depth result = Hlist([vlist]) return [result] # Handle regular sub/superscripts shift_up = nucleus.height - SUBDROP * xHeight if self.is_dropsub(nucleus): shift_down = nucleus.depth + SUBDROP * xHeight else: shift_down = SUBDROP * xHeight if super is None: # node757 sub.shrink() x = Hlist([sub]) # x.width += SCRIPT_SPACE * xHeight shift_down = max(shift_down, SUB1) clr = x.height - (abs(xHeight * 4.0) / 5.0) shift_down = max(shift_down, clr) x.shift_amount = shift_down else: super.shrink() x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)]) # x.width += SCRIPT_SPACE * xHeight clr = SUP1 * xHeight shift_up = max(shift_up, clr) clr = x.depth + (abs(xHeight) / 4.0) shift_up = max(shift_up, clr) if sub is None: x.shift_amount = -shift_up else: # Both sub and superscript sub.shrink() y = Hlist([sub]) # y.width += SCRIPT_SPACE * xHeight shift_down = max(shift_down, SUB1 * xHeight) clr = (2.0 * rule_thickness - ((shift_up - x.depth) - (y.height - shift_down))) if clr > 0.: shift_up += clr shift_down += clr if self.is_slanted(nucleus): x.shift_amount = DELTA * (shift_up + shift_down) x = Vlist([x, Kern((shift_up - x.depth) - (y.height - shift_down)), y]) x.shift_amount = shift_down result = Hlist([nucleus, x]) return [result] def _genfrac(self, ldelim, rdelim, rule, style, num, den): state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) rule = float(rule) num.shrink() den.shrink() cnum = HCentered([num]) cden = HCentered([den]) width = max(num.width, den.width) cnum.hpack(width, 'exactly') cden.hpack(width, 'exactly') vlist = Vlist([cnum, # numerator Vbox(0, thickness * 2.0), # space Hrule(state, rule), # rule Vbox(0, thickness * 2.0), # space cden # denominator ]) # Shift so the fraction line sits in the middle of the # equals sign metrics = state.font_output.get_metrics( state.font, rcParams['mathtext.default'], '=', state.fontsize, state.dpi) shift = (cden.height - ((metrics.ymax + metrics.ymin) / 2 - thickness * 3.0)) vlist.shift_amount = shift result = [Hlist([vlist, Hbox(thickness * 2.)])] if ldelim or rdelim: if ldelim == '': ldelim = '.' if rdelim == '': rdelim = '.' return self._auto_sized_delimiter(ldelim, result, rdelim) return result def genfrac(self, s, loc, toks): assert(len(toks)==1) assert(len(toks[0])==6) return self._genfrac(*tuple(toks[0])) def frac(self, s, loc, toks): assert(len(toks)==1) assert(len(toks[0])==2) state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) num, den = toks[0] return self._genfrac('', '', thickness, '', num, den) def stackrel(self, s, loc, toks): assert(len(toks)==1) assert(len(toks[0])==2) num, den = toks[0] return self._genfrac('', '', 0.0, '', num, den) def binom(self, s, loc, toks): assert(len(toks)==1) assert(len(toks[0])==2) num, den = toks[0] return self._genfrac('(', ')', 0.0, '', num, den) def sqrt(self, s, loc, toks): #~ print "sqrt", toks root, body = toks[0] state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) # Determine the height of the body, and add a little extra to # the height so it doesn't seem cramped height = body.height - body.shift_amount + thickness * 5.0 depth = body.depth + body.shift_amount check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True) height = check.height - check.shift_amount depth = check.depth + check.shift_amount # Put a little extra space to the left and right of the body padded_body = Hlist([Hbox(thickness * 2.0), body, Hbox(thickness * 2.0)]) rightside = Vlist([Hrule(state), Fill(), padded_body]) # Stretch the glue between the hrule and the body rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), 'exactly', depth) # Add the root and shift it upward so it is above the tick. # The value of 0.6 is a hard-coded hack ;) if root is None: root = Box(check.width * 0.5, 0., 0.) else: root = Hlist([Char(x, state) for x in root]) root.shrink() root.shrink() root_vlist = Vlist([Hlist([root])]) root_vlist.shift_amount = -height * 0.6 hlist = Hlist([root_vlist, # Root # Negative kerning to put root over tick Kern(-check.width * 0.5), check, # Check rightside]) # Body return [hlist] def overline(self, s, loc, toks): assert(len(toks)==1) assert(len(toks[0])==1) body = toks[0][0] state = self.get_state() thickness = state.font_output.get_underline_thickness( state.font, state.fontsize, state.dpi) height = body.height - body.shift_amount + thickness * 3.0 depth = body.depth + body.shift_amount # Place overline above body rightside = Vlist([Hrule(state), Fill(), Hlist([body])]) # Stretch the glue between the hrule and the body rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), 'exactly', depth) hlist = Hlist([rightside]) return [hlist] def _auto_sized_delimiter(self, front, middle, back): state = self.get_state() if len(middle): height = max([x.height for x in middle]) depth = max([x.depth for x in middle]) factor = None else: height = 0 depth = 0 factor = 1.0 parts = [] # \left. and \right. aren't supposed to produce any symbols if front != '.': parts.append(AutoHeightChar(front, height, depth, state, factor=factor)) parts.extend(middle) if back != '.': parts.append(AutoHeightChar(back, height, depth, state, factor=factor)) hlist = Hlist(parts) return hlist def auto_delim(self, s, loc, toks): #~ print "auto_delim", toks front, middle, back = toks return self._auto_sized_delimiter(front, middle.asList(), back) ### ############################################################################## # MAIN class MathTextParser(object): _parser = None _backend_mapping = { 'bitmap': MathtextBackendBitmap, 'agg' : MathtextBackendAgg, 'ps' : MathtextBackendPs, 'pdf' : MathtextBackendPdf, 'svg' : MathtextBackendSvg, 'path' : MathtextBackendPath, 'cairo' : MathtextBackendCairo, 'macosx': MathtextBackendAgg, } _font_type_mapping = { 'cm' : BakomaFonts, 'stix' : StixFonts, 'stixsans' : StixSansFonts, 'custom' : UnicodeFonts } def __init__(self, output): """ Create a MathTextParser for the given backend *output*. """ self._output = output.lower() self._cache = maxdict(50) def parse(self, s, dpi = 72, prop = None): """ Parse the given math expression *s* at the given *dpi*. If *prop* is provided, it is a :class:`~matplotlib.font_manager.FontProperties` object specifying the "default" font to use in the math expression, used for all non-math text. The results are cached, so multiple calls to :meth:`parse` with the same expression should be fast. """ # There is a bug in Python 3.x where it leaks frame references, # and therefore can't handle this caching if prop is None: prop = FontProperties() cacheKey = (s, dpi, hash(prop)) result = self._cache.get(cacheKey) if result is not None: return result if self._output == 'ps' and rcParams['ps.useafm']: font_output = StandardPsFonts(prop) else: backend = self._backend_mapping[self._output]() fontset = rcParams['mathtext.fontset'] fontset_class = self._font_type_mapping.get(fontset.lower()) if fontset_class is not None: font_output = fontset_class(prop, backend) else: raise ValueError( "mathtext.fontset must be either 'cm', 'stix', " "'stixsans', or 'custom'") fontsize = prop.get_size_in_points() # This is a class variable so we don't rebuild the parser # with each request. if self._parser is None: self.__class__._parser = Parser() box = self._parser.parse(s, font_output, fontsize, dpi) font_output.set_canvas_size(box.width, box.height, box.depth) result = font_output.get_results(box) self._cache[cacheKey] = result return result def to_mask(self, texstr, dpi=120, fontsize=14): """ *texstr* A valid mathtext string, eg r'IQ: $\sigma_i=15$' *dpi* The dots-per-inch to render the text *fontsize* The font size in points Returns a tuple (*array*, *depth*) - *array* is an NxM uint8 alpha ubyte mask array of rasterized tex. - depth is the offset of the baseline from the bottom of the image in pixels. """ assert(self._output=="bitmap") prop = FontProperties(size=fontsize) ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop) x = ftimage.as_array() return x, depth def to_rgba(self, texstr, color='black', dpi=120, fontsize=14): """ *texstr* A valid mathtext string, eg r'IQ: $\sigma_i=15$' *color* Any matplotlib color argument *dpi* The dots-per-inch to render the text *fontsize* The font size in points Returns a tuple (*array*, *depth*) - *array* is an NxM uint8 alpha ubyte mask array of rasterized tex. - depth is the offset of the baseline from the bottom of the image in pixels. """ x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize) r, g, b = mcolors.colorConverter.to_rgb(color) RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8) RGBA[:,:,0] = int(255*r) RGBA[:,:,1] = int(255*g) RGBA[:,:,2] = int(255*b) RGBA[:,:,3] = x return RGBA, depth def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14): """ Writes a tex expression to a PNG file. Returns the offset of the baseline from the bottom of the image in pixels. *filename* A writable filename or fileobject *texstr* A valid mathtext string, eg r'IQ: $\sigma_i=15$' *color* A valid matplotlib color argument *dpi* The dots-per-inch to render the text *fontsize* The font size in points Returns the offset of the baseline from the bottom of the image in pixels. """ rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize) numrows, numcols, tmp = rgba.shape _png.write_png(rgba.tostring(), numcols, numrows, filename) return depth def get_depth(self, texstr, dpi=120, fontsize=14): """ Returns the offset of the baseline from the bottom of the image in pixels. *texstr* A valid mathtext string, eg r'IQ: $\sigma_i=15$' *dpi* The dots-per-inch to render the text *fontsize* The font size in points """ assert(self._output=="bitmap") prop = FontProperties(size=fontsize) ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop) return depth def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None): """ Given a math expression, renders it in a closely-clipped bounding box to an image file. *s* A math expression. The math portion should be enclosed in dollar signs. *filename_or_obj* A filepath or writable file-like object to write the image data to. *prop* If provided, a FontProperties() object describing the size and style of the text. *dpi* Override the output dpi, otherwise use the default associated with the output format. *format* The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not provided, will be deduced from the filename. """ from matplotlib import figure # backend_agg supports all of the core output formats from matplotlib.backends import backend_agg if prop is None: prop = FontProperties() parser = MathTextParser('path') width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop) fig = figure.Figure(figsize=(width / 72.0, height / 72.0)) fig.text(0, depth/height, s, fontproperties=prop) backend_agg.FigureCanvasAgg(fig) fig.savefig(filename_or_obj, dpi=dpi, format=format) return depth
unlicense
ngoix/OCRF
examples/svm/plot_oneclass.py
80
2338
""" ========================================== One-class SVM with non-linear kernel (RBF) ========================================== An example using a one-class SVM for novelty detection. :ref:`One-class SVM <svm_outlier_detection>` is an unsupervised algorithm that learns a decision function for novelty detection: classifying new data as similar or different to the training set. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn import svm xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500)) # Generate train data X = 0.3 * np.random.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * np.random.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # plot the line, the points, and the nearest vectors to the plane Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.title("Novelty Detection") plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu) a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred') plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred') s = 40 b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s) b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s) c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s) plt.axis('tight') plt.xlim((-5, 5)) plt.ylim((-5, 5)) plt.legend([a.collections[0], b1, b2, c], ["learned frontier", "training observations", "new regular observations", "new abnormal observations"], loc="upper left", prop=matplotlib.font_manager.FontProperties(size=11)) plt.xlabel( "error train: %d/200 ; errors novel regular: %d/40 ; " "errors novel abnormal: %d/40" % (n_error_train, n_error_test, n_error_outliers)) plt.show()
bsd-3-clause
Jimmy-Morzaria/scikit-learn
sklearn/ensemble/partial_dependence.py
36
14909
"""Partial dependence plots for tree ensembles. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from itertools import count import numbers import numpy as np from scipy.stats.mstats import mquantiles from ..utils.extmath import cartesian from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import map, range, zip from ..utils import check_array from ..tree._tree import DTYPE from ._gradient_boosting import _partial_dependence_tree from .gradient_boosting import BaseGradientBoosting def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100): """Generate a grid of points based on the ``percentiles of ``X``. The grid is generated by placing ``grid_resolution`` equally spaced points between the ``percentiles`` of each column of ``X``. Parameters ---------- X : ndarray The data percentiles : tuple of floats The percentiles which are used to construct the extreme values of the grid axes. grid_resolution : int The number of equally spaced points that are placed on the grid. Returns ------- grid : ndarray All data points on the grid; ``grid.shape[1] == X.shape[1]`` and ``grid.shape[0] == grid_resolution * X.shape[1]``. axes : seq of ndarray The axes with which the grid has been created. """ if len(percentiles) != 2: raise ValueError('percentile must be tuple of len 2') if not all(0. <= x <= 1. for x in percentiles): raise ValueError('percentile values must be in [0, 1]') axes = [] for col in range(X.shape[1]): uniques = np.unique(X[:, col]) if uniques.shape[0] < grid_resolution: # feature has low resolution use unique vals axis = uniques else: emp_percentiles = mquantiles(X, prob=percentiles, axis=0) # create axis based on percentiles and grid resolution axis = np.linspace(emp_percentiles[0, col], emp_percentiles[1, col], num=grid_resolution, endpoint=True) axes.append(axis) return cartesian(axes), axes def partial_dependence(gbrt, target_variables, grid=None, X=None, percentiles=(0.05, 0.95), grid_resolution=100): """Partial dependence of ``target_variables``. Partial dependence plots show the dependence between the joint values of the ``target_variables`` and the function represented by the ``gbrt``. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. target_variables : array-like, dtype=int The target features for which the partial dependecy should be computed (size should be smaller than 3 for visual renderings). grid : array-like, shape=(n_points, len(target_variables)) The grid of ``target_variables`` values for which the partial dependecy should be evaluated (either ``grid`` or ``X`` must be specified). X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. It is used to generate a ``grid`` for the ``target_variables``. The ``grid`` comprises ``grid_resolution`` equally spaced points between the two ``percentiles``. percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the ``grid``. Only if ``X`` is not None. grid_resolution : int, default=100 The number of equally spaced points on the ``grid``. Returns ------- pdp : array, shape=(n_classes, n_points) The partial dependence function evaluated on the ``grid``. For regression and binary classification ``n_classes==1``. axes : seq of ndarray or None The axes with which the grid has been created or None if the grid has been given. Examples -------- >>> samples = [[0, 0, 2], [1, 0, 0]] >>> labels = [0, 1] >>> from sklearn.ensemble import GradientBoostingClassifier >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels) >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2) >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) """ if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) if (grid is None and X is None) or (grid is not None and X is not None): raise ValueError('Either grid or X must be specified') target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel() if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]): raise ValueError('target_variables must be in [0, %d]' % (gbrt.n_features - 1)) if X is not None: X = check_array(X, dtype=DTYPE, order='C') grid, axes = _grid_from_X(X[:, target_variables], percentiles, grid_resolution) else: assert grid is not None # dont return axes if grid is given axes = None # grid must be 2d if grid.ndim == 1: grid = grid[:, np.newaxis] if grid.ndim != 2: raise ValueError('grid must be 2d but is %dd' % grid.ndim) grid = np.asarray(grid, dtype=DTYPE, order='C') assert grid.shape[1] == target_variables.shape[0] n_trees_per_stage = gbrt.estimators_.shape[1] n_estimators = gbrt.estimators_.shape[0] pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = gbrt.estimators_[stage, k].tree_ _partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k]) return pdp, axes def plot_partial_dependence(gbrt, X, features, feature_names=None, label=None, n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), n_jobs=1, verbose=0, ax=None, line_kw=None, contour_kw=None, **fig_kw): """Partial dependence plots for ``features``. The ``len(features)`` plots are arranged in a grid with ``n_cols`` columns. Two-way partial dependence plots are plotted as contour plots. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. features : seq of tuples or ints If seq[i] is an int or a tuple with one int value, a one-way PDP is created; if seq[i] is a tuple of two ints, a two-way PDP is created. feature_names : seq of str Name of each feature; feature_names[i] holds the name of the feature with index i. label : object The class label for which the PDPs should be computed. Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``. n_cols : int The number of columns in the grid plot (default: 3). percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the PDP axes. grid_resolution : int, default=100 The number of equally spaced points on the axes. n_jobs : int The number of CPUs to use to compute the PDs. -1 means 'all CPUs'. Defaults to 1. verbose : int Verbose output during PD computations. Defaults to 0. ax : Matplotlib axis object, default None An axis object onto which the plots will be drawn. line_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For one-way partial dependence plots. contour_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For two-way partial dependence plots. fig_kw : dict Dict with keywords passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig : figure The Matplotlib Figure object. axs : seq of Axis objects A seq of Axis objects, one for each subplot. Examples -------- >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP ... """ import matplotlib.pyplot as plt from matplotlib import transforms from matplotlib.ticker import MaxNLocator from matplotlib.ticker import ScalarFormatter if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) # set label_idx for multi-class GBRT if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2: if label is None: raise ValueError('label is not given for multi-class PDP') label_idx = np.searchsorted(gbrt.classes_, label) if gbrt.classes_[label_idx] != label: raise ValueError('label %s not in ``gbrt.classes_``' % str(label)) else: # regression and binary classification label_idx = 0 X = check_array(X, dtype=DTYPE, order='C') if gbrt.n_features != X.shape[1]: raise ValueError('X.shape[1] does not match gbrt.n_features') if line_kw is None: line_kw = {'color': 'green'} if contour_kw is None: contour_kw = {} # convert feature_names to list if feature_names is None: # if not feature_names use fx indices as name feature_names = [str(i) for i in range(gbrt.n_features)] elif isinstance(feature_names, np.ndarray): feature_names = feature_names.tolist() def convert_feature(fx): if isinstance(fx, six.string_types): try: fx = feature_names.index(fx) except ValueError: raise ValueError('Feature %s not in feature_names' % fx) return fx # convert features into a seq of int tuples tmp_features = [] for fxs in features: if isinstance(fxs, (numbers.Integral,) + six.string_types): fxs = (fxs,) try: fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32) except TypeError: raise ValueError('features must be either int, str, or tuple ' 'of int/str') if not (1 <= np.size(fxs) <= 2): raise ValueError('target features must be either one or two') tmp_features.append(fxs) features = tmp_features names = [] try: for fxs in features: l = [] # explicit loop so "i" is bound for exception below for i in fxs: l.append(feature_names[i]) names.append(l) except IndexError: raise ValueError('features[i] must be in [0, n_features) ' 'but was %d' % i) # compute PD functions pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)(gbrt, fxs, X=X, grid_resolution=grid_resolution) for fxs in features) # get global min and max values of PD grouped by plot type pdp_lim = {} for pdp, axes in pd_result: min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max() n_fx = len(axes) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) if ax is None: fig = plt.figure(**fig_kw) else: fig = ax.get_figure() fig.clear() n_cols = min(n_cols, len(features)) n_rows = int(np.ceil(len(features) / float(n_cols))) axs = [] for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result): ax = fig.add_subplot(n_rows, n_cols, i + 1) if len(axes) == 1: ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw) else: # make contour plot assert len(axes) == 2 XX, YY = np.meshgrid(axes[0], axes[1]) Z = pdp[label_idx].reshape(list(map(np.size, axes))).T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], alpha=0.75, **contour_kw) ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) # plot data deciles + axes labels deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) ylim = ax.get_ylim() ax.vlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_xlabel(name[0]) ax.set_ylim(ylim) # prevent x-axis ticks from overlapping ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower')) tick_formatter = ScalarFormatter() tick_formatter.set_powerlimits((-3, 4)) ax.xaxis.set_major_formatter(tick_formatter) if len(axes) > 1: # two-way PDP - y-axis deciles + labels deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) xlim = ax.get_xlim() ax.hlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_ylabel(name[1]) # hline erases xlim ax.set_xlim(xlim) else: ax.set_ylabel('Partial dependence') if len(axes) == 1: ax.set_ylim(pdp_lim[1]) axs.append(ax) fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4, hspace=0.3) return fig, axs
bsd-3-clause
gautam1858/tensorflow
tensorflow/examples/get_started/regression/imports85.py
41
6589
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A dataset loader for imports85.data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np import tensorflow as tf try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: pass URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data" # Order is important for the csv-readers, so we use an OrderedDict here. defaults = collections.OrderedDict([ ("symboling", [0]), ("normalized-losses", [0.0]), ("make", [""]), ("fuel-type", [""]), ("aspiration", [""]), ("num-of-doors", [""]), ("body-style", [""]), ("drive-wheels", [""]), ("engine-location", [""]), ("wheel-base", [0.0]), ("length", [0.0]), ("width", [0.0]), ("height", [0.0]), ("curb-weight", [0.0]), ("engine-type", [""]), ("num-of-cylinders", [""]), ("engine-size", [0.0]), ("fuel-system", [""]), ("bore", [0.0]), ("stroke", [0.0]), ("compression-ratio", [0.0]), ("horsepower", [0.0]), ("peak-rpm", [0.0]), ("city-mpg", [0.0]), ("highway-mpg", [0.0]), ("price", [0.0]) ]) # pyformat: disable types = collections.OrderedDict((key, type(value[0])) for key, value in defaults.items()) def _get_imports85(): path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL) return path def dataset(y_name="price", train_fraction=0.7): """Load the imports85 data as a (train,test) pair of `Dataset`. Each dataset generates (features_dict, label) pairs. Args: y_name: The name of the column to use as the label. train_fraction: A float, the fraction of data to use for training. The remainder will be used for evaluation. Returns: A (train,test) pair of `Datasets` """ # Download and cache the data path = _get_imports85() # Define how the lines of the file should be parsed def decode_line(line): """Convert a csv line into a (features_dict,label) pair.""" # Decode the line to a tuple of items based on the types of # csv_header.values(). items = tf.decode_csv(line, list(defaults.values())) # Convert the keys and items to a dict. pairs = zip(defaults.keys(), items) features_dict = dict(pairs) # Remove the label from the features_dict label = features_dict.pop(y_name) return features_dict, label def has_no_question_marks(line): """Returns True if the line of text has no question marks.""" # split the line into an array of characters chars = tf.string_split(line[tf.newaxis], "").values # for each character check if it is a question mark is_question = tf.equal(chars, "?") any_question = tf.reduce_any(is_question) no_question = ~any_question return no_question def in_training_set(line): """Returns a boolean tensor, true if the line is in the training set.""" # If you randomly split the dataset you won't get the same split in both # sessions if you stop and restart training later. Also a simple # random split won't work with a dataset that's too big to `.cache()` as # we are doing here. num_buckets = 1000000 bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets) # Use the hash bucket id as a random number that's deterministic per example return bucket_id < int(train_fraction * num_buckets) def in_test_set(line): """Returns a boolean tensor, true if the line is in the training set.""" # Items not in the training set are in the test set. # This line must use `~` instead of `not` because `not` only works on python # booleans but we are dealing with symbolic tensors. return ~in_training_set(line) base_dataset = ( tf.data # Get the lines from the file. .TextLineDataset(path) # drop lines with question marks. .filter(has_no_question_marks)) train = (base_dataset # Take only the training-set lines. .filter(in_training_set) # Decode each line into a (features_dict, label) pair. .map(decode_line) # Cache data so you only decode the file once. .cache()) # Do the same for the test-set. test = (base_dataset.filter(in_test_set).cache().map(decode_line)) return train, test def raw_dataframe(): """Load the imports85 data as a pd.DataFrame.""" # Download and cache the data path = _get_imports85() # Load it into a pandas dataframe df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?") return df def load_data(y_name="price", train_fraction=0.7, seed=None): """Get the imports85 data set. A description of the data is available at: https://archive.ics.uci.edu/ml/datasets/automobile The data itself can be found at: https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data Args: y_name: the column to return as the label. train_fraction: the fraction of the dataset to use for training. seed: The random seed to use when shuffling the data. `None` generates a unique shuffle every run. Returns: a pair of pairs where the first pair is the training data, and the second is the test data: `(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)` `x` contains a pandas DataFrame of features, while `y` contains the label array. """ # Load the raw data columns. data = raw_dataframe() # Delete rows with unknowns data = data.dropna() # Shuffle the data np.random.seed(seed) # Split the data into train/test subsets. x_train = data.sample(frac=train_fraction, random_state=seed) x_test = data.drop(x_train.index) # Extract the label from the features dataframe. y_train = x_train.pop(y_name) y_test = x_test.pop(y_name) return (x_train, y_train), (x_test, y_test)
apache-2.0
endlessm/chromium-browser
third_party/catapult/trace_processor/experimental/visualize_traces/visualize_traces.py
7
3307
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import json import os import sys from ggplot import * import pandas def _ConvertToSimplifiedFormat(values_list): data = {} for trace_name, result_data in values_list: result_values = result_data['pairs']['values'] values = {} for current_value in result_values: grouping_key = current_value['name'] if current_value['type'] == 'numeric': numeric_value = current_value['numeric'] if numeric_value['type'] == 'scalar': values[grouping_key] = [current_value['numeric']['value']] elif numeric_value['type'] == 'numeric': # Let's just skip histograms for now. histogram_values = [] for bin in numeric_value['centralBins']: histo_mid = (bin['max'] - bin['min']) * 0.5 histogram_values += [histo_mid for _ in xrange(bin['count'])] values[grouping_key] = histogram_values data[trace_name] = values return data def _ReadValuesOutput(file_name, metric_names): with open(file_name, 'r') as f: results_list = f.read() try: # Try metrics format first, which is a dict of results. results_together = json.loads(results_list).iteritems() except ValueError: # Try flume pipeline output format, which is a 1 result per line. r = [json.loads(s) for s in results_list.splitlines()] results_together = dict([(str(i), r[i]) for i in xrange(len(r))]) simplified_data = _ConvertToSimplifiedFormat(results_together) return _ConvertValuesToPD(simplified_data, metric_names) def _ConvertValuesToPD(all_values_data, metric_names): pd_dict = {} for trace_name, values in all_values_data.iteritems(): for metric_name, metric_value in values.iteritems(): if not metric_name in metric_names: continue if not metric_name in pd_dict: pd_dict[metric_name] = [] pd_dict[metric_name].extend(metric_value) return pandas.DataFrame(pd_dict) def _DoGGPlot(graph_type, data, x, y): if graph_type == 'histogram': print ggplot(aes(x=x), data=data) + geom_histogram() elif graph_type == 'point': print (ggplot(aes(x=x, y=y), data=data) + geom_point() + stat_smooth(color='black', se=True)) def Main(): # Parse options. parser = argparse.ArgumentParser() parser.add_argument('--x', help='X-Axis parameter.') parser.add_argument('--y', help='X-Axis parameter.') parser.add_argument('--graph-type', choices=['histogram', 'point'], help='Type of graph.') parser.add_argument('--source', help='Path to file containing results of metrics run.') args = parser.parse_args() if not args.source: parser.error('Source file not specified. Use --source to specify ' 'path to source file.') if not args.graph_type: parser.error('Graph type not specified. Use --graph-type.') metric_names = [] if args.x: metric_names.append(args.x) if args.y: metric_names.append(args.y) data = _ReadValuesOutput(os.path.abspath(args.source), metric_names) _DoGGPlot(args.graph_type, data, args.x, args.y)
bsd-3-clause
alexandreday/fast_density_clustering
example/example2.py
1
2896
''' Created on Feb 4, 2017 @author: Alexandre Day Perform density clustering on some datasets found in the sklearn documentation on clustering (http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html) ''' import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from fdc import FDC, plotting np.random.seed(0) # ============ # Generate datasets. We choose the size big enough to see the scalability # of the algorithms, but not too big to avoid too long running times # ============ n_samples = 1500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05) blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) no_structure = np.random.rand(n_samples, 2), None # Anisotropicly distributed data random_state = 170 X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state) transformation = [[0.6, -0.6], [-0.4, 0.8]] X_aniso = np.dot(X, transformation) aniso = (X_aniso, y) # blobs with varied variances varied = datasets.make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) plot_num = 1 fig= plt.figure(figsize=(7, 10)) ################################# ################################# ################################# ################################# """ Setting FDC parameters (note these are the same across all datasets) """ datasets = [noisy_circles, noisy_moons, varied, aniso, blobs, no_structure] for i_dataset, dataset in enumerate(datasets): X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # create clustering estimators # atol and rtol set the precision of the density map, higher value improves performanc but reduces accuracy model = FDC(eta=0.4) s=time.time() model.fit(X) dt=time.time()-s n_center=len(model.idx_centers) plt.subplot(3,2,plot_num) plt.scatter(X[:, 0], X[:, 1], color=colors[model.cluster_label].tolist(), s=10,zorder=1) plt.text(.99, .07, ('%.2fs' % (dt)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right',zorder=2, bbox=dict(facecolor='white', edgecolor='black', boxstyle='round,pad=0.1') ) plot_num+=1 plt.suptitle("Local density clustering with %s kernels \n Number of data points = %i"%(model.kernel,n_samples)) plt.savefig("sklearn_datasets.png") fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.show()
bsd-3-clause
ice3/VAST_2015
Challenge1/analysis/test_pandas.py
1
1309
import sqlite3 import pandas from matplotlib import pylab as plt def get_data(fname, table, nb_lines): conn = sqlite3.connect(fname) sql_request = "SELECT * FROM {} LIMIT {}".format(table, nb_lines) df = pandas.read_sql(sql_request, conn, parse_dates="time", index_col='time') return df #.set_index('time') df = get_data("./saturday.db", "spatial_data", 500000) # # resample - aggregate # bars = ticks.Price.resample('1min', how='ohlc') # # indexing # vwap.ix['2011-11-01 09:27':'2011-11-01 09:32'] # vwap.between_time('10:00', '16:00') # for the same day # # or # bars.open.at_time('9:30') def create_heatmaps(df, key=lambda t: t.minute): for group, data in df.groupby(df.index.map(key)): all_mat = np.zeros((100,100), dtype=np.int) for x, y in zip(data.x, data.y): all_mat[x, y] += 1 all_mat = all_mat*1.0/len(data) plt.matshow(all_mat) plt.title(data.ix[0].name) print("saving: ", group) plt.savefig("{:02}.png".format(group)) create_heatmaps(df) def create_path_id(df, ids): def getId(i): return df[df.id==i] for id_ in ids: plt.scatter(getId(id_).x, getId(id_).y, label = str(id_)) plt.legend() id_unique = df.id.unique() create_path_id(df, id_unique[:10]) plt.plot(df2[df2.status=="check-in"].x, df2[df2.status=="check-in"].y) plt.show()
gpl-2.0
MikeOuimet/AI-fun
RLbasics/dieN.py
1
2257
import numpy as np import matplotlib.pyplot as plt '''Basic TD(0) implementation illustrated on a die rolling problem where you get reward equal to die roll unless roll a bad number, which makes you lose everything collected. Actions are to 0 - keep rolling, 1- keep the money you've collected and stop playing ''' # Parameters N = 10 e1 = .1 alpha = .1 gamma = 0.9 bad = [10] endstate = 1000 Q = np.zeros((endstate, 2)) # TD(0) state = 0 # epsilon-greedy def epsilongreedy(Qf, statef, epsilonf): if np.random.random() > epsilonf: a = Qf[statef, :].argmax() else: #args = Qf[statef, :].argsort() #a = args[0] a = np.random.randint(0,2) return a # 1 = stop, 0 = bet def environment(statef, a, bad): if a == 0: nature = np.random.randint(1, N+1) if nature in bad: r = -state goon = False ns = endstate-1 else: r = nature goon= True ns = statef + nature else: r = 0 goon = False ns = endstate-1 return r, goon, ns # + gamma*potential[newstate] -potential[state] def td0(Q, s, a, r, ns): Qnew = Q.copy() Qnew[s,a] = Q[s, a] + alpha*(r + gamma*(Q[ns, :].max()) - Q[s, a]) return Qnew go_on = True nruns = 50000 for run in range(nruns): eps = e1 while go_on: action = epsilongreedy(Q, state, eps) reward, go_on, newstate = environment(state, action, bad) Qnew = td0(Q, state, action, reward, newstate) #if run % 10000 == 0: # print run # print 'The state is %r and action is %r and prior Q is (%r %r)' % (state, action, Q[state, 0], Q[state, 1]) # print 'The reward is %r and the new state is %r' % (reward, newstate) # print 'The Q of the new state %r is (%r %r)' % (newstate, Q[newstate, 0], Q[newstate,1]) # print 'The updated Q of state %r is (%r %r)' % (state, Qnew[state, 0], Qnew[state, 1]) # print '' Q = Qnew state = newstate go_on = True state = np.random.randint(0, 6) printn = 100 states = np.linspace(0, printn, printn+1) plt.plot(states, Q[0:printn+1, 1]) plt.plot(states, Q[0:printn+1, 0]) plt.show()
mit
nabilbendafi/script.module.pydevd
lib/pydev_ipython/inputhook.py
52
18411
# coding: utf-8 """ Inputhook management for GUI event loop integration. """ #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys import select #----------------------------------------------------------------------------- # Constants #----------------------------------------------------------------------------- # Constants for identifying the GUI toolkits. GUI_WX = 'wx' GUI_QT = 'qt' GUI_QT4 = 'qt4' GUI_GTK = 'gtk' GUI_TK = 'tk' GUI_OSX = 'osx' GUI_GLUT = 'glut' GUI_PYGLET = 'pyglet' GUI_GTK3 = 'gtk3' GUI_NONE = 'none' # i.e. disable #----------------------------------------------------------------------------- # Utilities #----------------------------------------------------------------------------- def ignore_CTRL_C(): """Ignore CTRL+C (not implemented).""" pass def allow_CTRL_C(): """Take CTRL+C into account (not implemented).""" pass #----------------------------------------------------------------------------- # Main InputHookManager class #----------------------------------------------------------------------------- class InputHookManager(object): """Manage PyOS_InputHook for different GUI toolkits. This class installs various hooks under ``PyOSInputHook`` to handle GUI event loop integration. """ def __init__(self): self._return_control_callback = None self._apps = {} self._reset() self.pyplot_imported = False def _reset(self): self._callback_pyfunctype = None self._callback = None self._current_gui = None def set_return_control_callback(self, return_control_callback): self._return_control_callback = return_control_callback def get_return_control_callback(self): return self._return_control_callback def return_control(self): return self._return_control_callback() def get_inputhook(self): return self._callback def set_inputhook(self, callback): """Set inputhook to callback.""" # We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather # while waiting for input on xmlrpc we run this code self._callback = callback def clear_inputhook(self, app=None): """Clear input hook. Parameters ---------- app : optional, ignored This parameter is allowed only so that clear_inputhook() can be called with a similar interface as all the ``enable_*`` methods. But the actual value of the parameter is ignored. This uniform interface makes it easier to have user-level entry points in the main IPython app like :meth:`enable_gui`.""" self._reset() def clear_app_refs(self, gui=None): """Clear IPython's internal reference to an application instance. Whenever we create an app for a user on qt4 or wx, we hold a reference to the app. This is needed because in some cases bad things can happen if a user doesn't hold a reference themselves. This method is provided to clear the references we are holding. Parameters ---------- gui : None or str If None, clear all app references. If ('wx', 'qt4') clear the app for that toolkit. References are not held for gtk or tk as those toolkits don't have the notion of an app. """ if gui is None: self._apps = {} elif gui in self._apps: del self._apps[gui] def enable_wx(self, app=None): """Enable event loop integration with wxPython. Parameters ---------- app : WX Application, optional. Running application to use. If not given, we probe WX for an existing application object, and create a new one if none is found. Notes ----- This methods sets the ``PyOS_InputHook`` for wxPython, which allows the wxPython to integrate with terminal based applications like IPython. If ``app`` is not given we probe for an existing one, and return it if found. If no existing app is found, we create an :class:`wx.App` as follows:: import wx app = wx.App(redirect=False, clearSigInt=False) """ import wx from distutils.version import LooseVersion as V wx_version = V(wx.__version__).version if wx_version < [2, 8]: raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) from pydev_ipython.inputhookwx import inputhook_wx self.set_inputhook(inputhook_wx) self._current_gui = GUI_WX if app is None: app = wx.GetApp() if app is None: app = wx.App(redirect=False, clearSigInt=False) app._in_event_loop = True self._apps[GUI_WX] = app return app def disable_wx(self): """Disable event loop integration with wxPython. This merely sets PyOS_InputHook to NULL. """ if GUI_WX in self._apps: self._apps[GUI_WX]._in_event_loop = False self.clear_inputhook() def enable_qt4(self, app=None): """Enable event loop integration with PyQt4. Parameters ---------- app : Qt Application, optional. Running application to use. If not given, we probe Qt for an existing application object, and create a new one if none is found. Notes ----- This methods sets the PyOS_InputHook for PyQt4, which allows the PyQt4 to integrate with terminal based applications like IPython. If ``app`` is not given we probe for an existing one, and return it if found. If no existing app is found, we create an :class:`QApplication` as follows:: from PyQt4 import QtCore app = QtGui.QApplication(sys.argv) """ from pydev_ipython.inputhookqt4 import create_inputhook_qt4 app, inputhook_qt4 = create_inputhook_qt4(self, app) self.set_inputhook(inputhook_qt4) self._current_gui = GUI_QT4 app._in_event_loop = True self._apps[GUI_QT4] = app return app def disable_qt4(self): """Disable event loop integration with PyQt4. This merely sets PyOS_InputHook to NULL. """ if GUI_QT4 in self._apps: self._apps[GUI_QT4]._in_event_loop = False self.clear_inputhook() def enable_gtk(self, app=None): """Enable event loop integration with PyGTK. Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for PyGTK, which allows the PyGTK to integrate with terminal based applications like IPython. """ from pydev_ipython.inputhookgtk import create_inputhook_gtk self.set_inputhook(create_inputhook_gtk(self._stdin_file)) self._current_gui = GUI_GTK def disable_gtk(self): """Disable event loop integration with PyGTK. This merely sets PyOS_InputHook to NULL. """ self.clear_inputhook() def enable_tk(self, app=None): """Enable event loop integration with Tk. Parameters ---------- app : toplevel :class:`Tkinter.Tk` widget, optional. Running toplevel widget to use. If not given, we probe Tk for an existing one, and create a new one if none is found. Notes ----- If you have already created a :class:`Tkinter.Tk` object, the only thing done by this method is to register with the :class:`InputHookManager`, since creating that object automatically sets ``PyOS_InputHook``. """ self._current_gui = GUI_TK if app is None: try: import Tkinter as _TK except: # Python 3 import tkinter as _TK app = _TK.Tk() app.withdraw() self._apps[GUI_TK] = app from pydev_ipython.inputhooktk import create_inputhook_tk self.set_inputhook(create_inputhook_tk(app)) return app def disable_tk(self): """Disable event loop integration with Tkinter. This merely sets PyOS_InputHook to NULL. """ self.clear_inputhook() def enable_glut(self, app=None): """ Enable event loop integration with GLUT. Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to integrate with terminal based applications like IPython. Due to GLUT limitations, it is currently not possible to start the event loop without first creating a window. You should thus not create another window but use instead the created one. See 'gui-glut.py' in the docs/examples/lib directory. The default screen mode is set to: glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH """ import OpenGL.GLUT as glut from pydev_ipython.inputhookglut import glut_display_mode, \ glut_close, glut_display, \ glut_idle, inputhook_glut if GUI_GLUT not in self._apps: glut.glutInit(sys.argv) glut.glutInitDisplayMode(glut_display_mode) # This is specific to freeglut if bool(glut.glutSetOption): glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE, glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS) glut.glutCreateWindow(sys.argv[0]) glut.glutReshapeWindow(1, 1) glut.glutHideWindow() glut.glutWMCloseFunc(glut_close) glut.glutDisplayFunc(glut_display) glut.glutIdleFunc(glut_idle) else: glut.glutWMCloseFunc(glut_close) glut.glutDisplayFunc(glut_display) glut.glutIdleFunc(glut_idle) self.set_inputhook(inputhook_glut) self._current_gui = GUI_GLUT self._apps[GUI_GLUT] = True def disable_glut(self): """Disable event loop integration with glut. This sets PyOS_InputHook to NULL and set the display function to a dummy one and set the timer to a dummy timer that will be triggered very far in the future. """ import OpenGL.GLUT as glut from glut_support import glutMainLoopEvent # @UnresolvedImport glut.glutHideWindow() # This is an event to be processed below glutMainLoopEvent() self.clear_inputhook() def enable_pyglet(self, app=None): """Enable event loop integration with pyglet. Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the ``PyOS_InputHook`` for pyglet, which allows pyglet to integrate with terminal based applications like IPython. """ from pydev_ipython.inputhookpyglet import inputhook_pyglet self.set_inputhook(inputhook_pyglet) self._current_gui = GUI_PYGLET return app def disable_pyglet(self): """Disable event loop integration with pyglet. This merely sets PyOS_InputHook to NULL. """ self.clear_inputhook() def enable_gtk3(self, app=None): """Enable event loop integration with Gtk3 (gir bindings). Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for Gtk3, which allows the Gtk3 to integrate with terminal based applications like IPython. """ from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3 self.set_inputhook(create_inputhook_gtk3(self._stdin_file)) self._current_gui = GUI_GTK def disable_gtk3(self): """Disable event loop integration with PyGTK. This merely sets PyOS_InputHook to NULL. """ self.clear_inputhook() def enable_mac(self, app=None): """ Enable event loop integration with MacOSX. We call function pyplot.pause, which updates and displays active figure during pause. It's not MacOSX-specific, but it enables to avoid inputhooks in native MacOSX backend. Also we shouldn't import pyplot, until user does it. Cause it's possible to choose backend before importing pyplot for the first time only. """ def inputhook_mac(app=None): if self.pyplot_imported: pyplot = sys.modules['matplotlib.pyplot'] try: pyplot.pause(0.01) except: pass else: if 'matplotlib.pyplot' in sys.modules: self.pyplot_imported = True self.set_inputhook(inputhook_mac) self._current_gui = GUI_OSX def disable_mac(self): self.clear_inputhook() def current_gui(self): """Return a string indicating the currently active GUI or None.""" return self._current_gui inputhook_manager = InputHookManager() enable_wx = inputhook_manager.enable_wx disable_wx = inputhook_manager.disable_wx enable_qt4 = inputhook_manager.enable_qt4 disable_qt4 = inputhook_manager.disable_qt4 enable_gtk = inputhook_manager.enable_gtk disable_gtk = inputhook_manager.disable_gtk enable_tk = inputhook_manager.enable_tk disable_tk = inputhook_manager.disable_tk enable_glut = inputhook_manager.enable_glut disable_glut = inputhook_manager.disable_glut enable_pyglet = inputhook_manager.enable_pyglet disable_pyglet = inputhook_manager.disable_pyglet enable_gtk3 = inputhook_manager.enable_gtk3 disable_gtk3 = inputhook_manager.disable_gtk3 enable_mac = inputhook_manager.enable_mac disable_mac = inputhook_manager.disable_mac clear_inputhook = inputhook_manager.clear_inputhook set_inputhook = inputhook_manager.set_inputhook current_gui = inputhook_manager.current_gui clear_app_refs = inputhook_manager.clear_app_refs # We maintain this as stdin_ready so that the individual inputhooks # can diverge as little as possible from their IPython sources stdin_ready = inputhook_manager.return_control set_return_control_callback = inputhook_manager.set_return_control_callback get_return_control_callback = inputhook_manager.get_return_control_callback get_inputhook = inputhook_manager.get_inputhook # Convenience function to switch amongst them def enable_gui(gui=None, app=None): """Switch amongst GUI input hooks by name. This is just a utility wrapper around the methods of the InputHookManager object. Parameters ---------- gui : optional, string or None If None (or 'none'), clears input hook, otherwise it must be one of the recognized GUI names (see ``GUI_*`` constants in module). app : optional, existing application object. For toolkits that have the concept of a global app, you can supply an existing one. If not given, the toolkit will be probed for one, and if none is found, a new one will be created. Note that GTK does not have this concept, and passing an app if ``gui=="GTK"`` will raise an error. Returns ------- The output of the underlying gui switch routine, typically the actual PyOS_InputHook wrapper object or the GUI toolkit app created, if there was one. """ if get_return_control_callback() is None: raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled") guis = {GUI_NONE: clear_inputhook, GUI_OSX: enable_mac, GUI_TK: enable_tk, GUI_GTK: enable_gtk, GUI_WX: enable_wx, GUI_QT: enable_qt4, # qt3 not supported GUI_QT4: enable_qt4, GUI_GLUT: enable_glut, GUI_PYGLET: enable_pyglet, GUI_GTK3: enable_gtk3, } try: gui_hook = guis[gui] except KeyError: if gui is None or gui == '': gui_hook = clear_inputhook else: e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys()) raise ValueError(e) return gui_hook(app) __all__ = [ "GUI_WX", "GUI_QT", "GUI_QT4", "GUI_GTK", "GUI_TK", "GUI_OSX", "GUI_GLUT", "GUI_PYGLET", "GUI_GTK3", "GUI_NONE", "ignore_CTRL_C", "allow_CTRL_C", "InputHookManager", "inputhook_manager", "enable_wx", "disable_wx", "enable_qt4", "disable_qt4", "enable_gtk", "disable_gtk", "enable_tk", "disable_tk", "enable_glut", "disable_glut", "enable_pyglet", "disable_pyglet", "enable_gtk3", "disable_gtk3", "enable_mac", "disable_mac", "clear_inputhook", "set_inputhook", "current_gui", "clear_app_refs", "stdin_ready", "set_return_control_callback", "get_return_control_callback", "get_inputhook", "enable_gui"]
epl-1.0
larsmans/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
mraspaud/dask
dask/dataframe/utils.py
1
21094
from __future__ import absolute_import, division, print_function import re import textwrap from distutils.version import LooseVersion from collections import Iterator import sys import traceback from contextlib import contextmanager import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.api.types import is_categorical_dtype, is_scalar try: from pandas.api.types import is_datetime64tz_dtype except ImportError: # pandas < 0.19.2 from pandas.core.common import is_datetime64tz_dtype from ..core import get_deps from ..local import get_sync PANDAS_VERSION = LooseVersion(pd.__version__) def shard_df_on_index(df, divisions): """ Shard a DataFrame by ranges on its index Examples -------- >>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]}) >>> df a b 0 0 5 1 10 4 2 20 3 3 30 2 4 40 1 >>> shards = list(shard_df_on_index(df, [2, 4])) >>> shards[0] a b 0 0 5 1 10 4 >>> shards[1] a b 2 20 3 3 30 2 >>> shards[2] a b 4 40 1 >>> list(shard_df_on_index(df, []))[0] # empty case a b 0 0 5 1 10 4 2 20 3 3 30 2 4 40 1 """ if isinstance(divisions, Iterator): divisions = list(divisions) if not len(divisions): yield df else: divisions = np.array(divisions) df = df.sort_index() index = df.index if is_categorical_dtype(index): index = index.as_ordered() indices = index.searchsorted(divisions) yield df.iloc[:indices[0]] for i in range(len(indices) - 1): yield df.iloc[indices[i]: indices[i + 1]] yield df.iloc[indices[-1]:] _META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional" _META_DESCRIPTION = """\ An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and column names of the output. This metadata is necessary for many algorithms in dask dataframe to work. For ease of use, some alternative inputs are also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided. Instead of a series, a tuple of ``(name, dtype)`` can be used. If not provided, dask will try to infer the metadata. This may lead to unexpected results, so providing ``meta`` is recommended. For more information, see ``dask.dataframe.utils.make_meta``. """ def insert_meta_param_description(*args, **kwargs): """Replace `$META` in docstring with param description. If pad keyword is provided, will pad description by that number of spaces (default is 8).""" if not args: return lambda f: insert_meta_param_description(f, **kwargs) f = args[0] indent = " " * kwargs.get('pad', 8) body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78) descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body)) if f.__doc__: if '$META' in f.__doc__: f.__doc__ = f.__doc__.replace('$META', descr) else: # Put it at the end of the parameters section parameter_header = 'Parameters\n%s----------' % indent[4:] first, last = re.split('Parameters\\n[ ]*----------', f.__doc__) parameters, rest = last.split('\n\n', 1) f.__doc__ = '{0}{1}{2}\n{3}{4}\n\n{5}'.format(first, parameter_header, parameters, indent[4:], descr, rest) return f @contextmanager def raise_on_meta_error(funcname=None): """Reraise errors in this block to show metadata inference failure. Parameters ---------- funcname : str, optional If provided, will be added to the error message to indicate the name of the method that failed. """ try: yield except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() tb = ''.join(traceback.format_tb(exc_traceback)) msg = ("Metadata inference failed{0}.\n\n" "Original error is below:\n" "------------------------\n" "{1}\n\n" "Traceback:\n" "---------\n" "{2}" ).format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb) raise ValueError(msg) UNKNOWN_CATEGORIES = '__UNKNOWN_CATEGORIES__' def has_known_categories(x): """Returns whether the categories in `x` are known. Parameters ---------- x : Series or CategoricalIndex """ x = getattr(x, '_meta', x) if isinstance(x, pd.Series): return UNKNOWN_CATEGORIES not in x.cat.categories elif isinstance(x, pd.CategoricalIndex): return UNKNOWN_CATEGORIES not in x.categories raise TypeError("Expected Series or CategoricalIndex") def strip_unknown_categories(x): """Replace any unknown categoricals with empty categoricals. Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results. """ if isinstance(x, (pd.Series, pd.DataFrame)): x = x.copy() if isinstance(x, pd.DataFrame): cat_mask = x.dtypes == 'category' if cat_mask.any(): cats = cat_mask[cat_mask].index for c in cats: if not has_known_categories(x[c]): x[c].cat.set_categories([], inplace=True) elif isinstance(x, pd.Series): if is_categorical_dtype(x.dtype) and not has_known_categories(x): x.cat.set_categories([], inplace=True) if (isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(x.index)): x.index = x.index.set_categories([]) elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x): x = x.set_categories([]) return x def clear_known_categories(x, cols=None, index=True): """Set categories to be unknown. Parameters ---------- x : DataFrame, Series, Index cols : iterable, optional If x is a DataFrame, set only categoricals in these columns to unknown. By default, all categorical columns are set to unknown categoricals index : bool, optional If True and x is a Series or DataFrame, set the clear known categories in the index as well. """ if isinstance(x, (pd.Series, pd.DataFrame)): x = x.copy() if isinstance(x, pd.DataFrame): mask = x.dtypes == 'category' if cols is None: cols = mask[mask].index elif not mask.loc[cols].all(): raise ValueError("Not all columns are categoricals") for c in cols: x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True) elif isinstance(x, pd.Series): if is_categorical_dtype(x.dtype): x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True) if index and isinstance(x.index, pd.CategoricalIndex): x.index = x.index.set_categories([UNKNOWN_CATEGORIES]) elif isinstance(x, pd.CategoricalIndex): x = x.set_categories([UNKNOWN_CATEGORIES]) return x def _empty_series(name, dtype, index=None): if isinstance(dtype, str) and dtype == 'category': return pd.Series(pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index).iloc[:0] return pd.Series([], dtype=dtype, name=name, index=index) def make_meta(x, index=None): """Create an empty pandas object containing the desired metadata. Parameters ---------- x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or an iterable of `(name, dtype)` tuples. To create a `Series`, provide a tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index should match the desired output. If a dtype or scalar, a scalar of the same dtype is returned. index : pd.Index, optional Any pandas index to use in the metadata. If none provided, a `RangeIndex` will be used. Examples -------- >>> make_meta([('a', 'i8'), ('b', 'O')]) Empty DataFrame Columns: [a, b] Index: [] >>> make_meta(('a', 'f8')) Series([], Name: a, dtype: float64) >>> make_meta('i8') 1 """ if hasattr(x, '_meta'): return x._meta if isinstance(x, (pd.Series, pd.DataFrame)): return x.iloc[0:0] elif isinstance(x, pd.Index): return x[0:0] index = index if index is None else index[0:0] if isinstance(x, dict): return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index) if isinstance(x, tuple) and len(x) == 2: return _empty_series(x[0], x[1], index=index) elif isinstance(x, (list, tuple)): if not all(isinstance(i, tuple) and len(i) == 2 for i in x): raise ValueError("Expected iterable of tuples of (name, dtype), " "got {0}".format(x)) return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x}, columns=[c for c, d in x], index=index) elif not hasattr(x, 'dtype') and x is not None: # could be a string, a dtype object, or a python type. Skip `None`, # because it is implictly converted to `dtype('f8')`, which we don't # want here. try: dtype = np.dtype(x) return _scalar_from_dtype(dtype) except: # Continue on to next check pass if is_scalar(x): return _nonempty_scalar(x) raise TypeError("Don't know how to create metadata from {0}".format(x)) def _nonempty_index(idx): typ = type(idx) if typ is pd.RangeIndex: return pd.RangeIndex(2, name=idx.name) elif typ in (pd.Int64Index, pd.Float64Index): return typ([1, 2], name=idx.name) elif typ is pd.Index: return pd.Index(['a', 'b'], name=idx.name) elif typ is pd.DatetimeIndex: start = '1970-01-01' data = [start, start] if idx.freq is None else None return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name) elif typ is pd.PeriodIndex: return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq, name=idx.name) elif typ is pd.TimedeltaIndex: start = np.timedelta64(1, 'D') data = [start, start] if idx.freq is None else None return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq, name=idx.name) elif typ is pd.CategoricalIndex: if len(idx.categories): data = [idx.categories[0]] * 2 cats = idx.categories else: data = _nonempty_index(idx.categories) cats = None return pd.CategoricalIndex(data, categories=cats, ordered=idx.ordered, name=idx.name) elif typ is pd.MultiIndex: levels = [_nonempty_index(i) for i in idx.levels] labels = [[0, 0] for i in idx.levels] return pd.MultiIndex(levels=levels, labels=labels, names=idx.names) raise TypeError("Don't know how to handle index of " "type {0}".format(type(idx).__name__)) _simple_fake_mapping = { 'b': np.bool_(True), 'V': np.void(b' '), 'M': np.datetime64('1970-01-01'), 'm': np.timedelta64(1), 'S': np.str_('foo'), 'a': np.str_('foo'), 'U': np.unicode_('foo'), 'O': 'foo' } def _scalar_from_dtype(dtype): if dtype.kind in ('i', 'f', 'u'): return dtype.type(1) elif dtype.kind == 'c': return dtype.type(complex(1, 0)) elif dtype.kind in _simple_fake_mapping: o = _simple_fake_mapping[dtype.kind] return o.astype(dtype) if dtype.kind in ('m', 'M') else o else: raise TypeError("Can't handle dtype: {0}".format(dtype)) def _nonempty_scalar(x): if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)): return x elif np.isscalar(x): dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x)) return _scalar_from_dtype(dtype) else: raise TypeError("Can't handle meta of type " "'{0}'".format(type(x).__name__)) def _nonempty_series(s, idx): dtype = s.dtype if is_datetime64tz_dtype(dtype): entry = pd.Timestamp('1970-01-01', tz=dtype.tz) data = [entry, entry] elif is_categorical_dtype(dtype): if len(s.cat.categories): data = [s.cat.categories[0]] * 2 cats = s.cat.categories else: data = _nonempty_index(s.cat.categories) cats = None data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered) else: entry = _scalar_from_dtype(dtype) data = np.array([entry, entry], dtype=dtype) return pd.Series(data, name=s.name, index=idx) def meta_nonempty(x): """Create a nonempty pandas object from the given metadata. Returns a pandas DataFrame, Series, or Index that contains two rows of fake data. """ if isinstance(x, pd.Index): return _nonempty_index(x) elif isinstance(x, pd.Series): idx = _nonempty_index(x.index) return _nonempty_series(x, idx) elif isinstance(x, pd.DataFrame): idx = _nonempty_index(x.index) data = {i: _nonempty_series(x.iloc[:, i], idx) for i, c in enumerate(x.columns)} res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns))) res.columns = x.columns return res elif is_scalar(x): return _nonempty_scalar(x) else: raise TypeError("Expected Index, Series, DataFrame, or scalar, " "got {0}".format(type(x).__name__)) ############################################################### # Testing ############################################################### def _check_dask(dsk, check_names=True, check_dtypes=True, result=None): import dask.dataframe as dd if hasattr(dsk, 'dask'): if result is None: result = dsk.compute(get=get_sync) if isinstance(dsk, dd.Index): assert isinstance(result, pd.Index), type(result) assert isinstance(dsk._meta, pd.Index), type(dsk._meta) if check_names: assert dsk.name == result.name assert dsk._meta.name == result.name if isinstance(result, pd.MultiIndex): assert result.names == dsk._meta.names if check_dtypes: assert_dask_dtypes(dsk, result) elif isinstance(dsk, dd.Series): assert isinstance(result, pd.Series), type(result) assert isinstance(dsk._meta, pd.Series), type(dsk._meta) if check_names: assert dsk.name == result.name, (dsk.name, result.name) assert dsk._meta.name == result.name if check_dtypes: assert_dask_dtypes(dsk, result) _check_dask(dsk.index, check_names=check_names, check_dtypes=check_dtypes, result=result.index) elif isinstance(dsk, dd.DataFrame): assert isinstance(result, pd.DataFrame), type(result) assert isinstance(dsk.columns, pd.Index), type(dsk.columns) assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta) if check_names: tm.assert_index_equal(dsk.columns, result.columns) tm.assert_index_equal(dsk._meta.columns, result.columns) if check_dtypes: assert_dask_dtypes(dsk, result) _check_dask(dsk.index, check_names=check_names, check_dtypes=check_dtypes, result=result.index) elif isinstance(dsk, dd.core.Scalar): assert (np.isscalar(result) or isinstance(result, (pd.Timestamp, pd.Timedelta))) if check_dtypes: assert_dask_dtypes(dsk, result) else: msg = 'Unsupported dask instance {0} found'.format(type(dsk)) raise AssertionError(msg) return result return dsk def _maybe_sort(a): # sort by value, then index try: if isinstance(a, pd.DataFrame): a = a.sort_values(by=a.columns.tolist()) else: a = a.sort_values() except (TypeError, IndexError, ValueError): pass return a.sort_index() def assert_eq(a, b, check_names=True, check_dtypes=True, check_divisions=True, check_index=True, **kwargs): if check_divisions: assert_divisions(a) assert_divisions(b) if hasattr(a, 'divisions') and hasattr(b, 'divisions'): at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion assert at == bt, (at, bt) assert_sane_keynames(a) assert_sane_keynames(b) a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes) b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes) if not check_index: a = a.reset_index(drop=True) b = b.reset_index(drop=True) if isinstance(a, pd.DataFrame): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_frame_equal(a, b, **kwargs) elif isinstance(a, pd.Series): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_series_equal(a, b, check_names=check_names, **kwargs) elif isinstance(a, pd.Index): tm.assert_index_equal(a, b, **kwargs) else: if a == b: return True else: if np.isnan(a): assert np.isnan(b) else: assert np.allclose(a, b) return True def assert_dask_graph(dask, label): if hasattr(dask, 'dask'): dask = dask.dask assert isinstance(dask, dict) for k in dask: if isinstance(k, tuple): k = k[0] if k.startswith(label): return True else: msg = "given dask graph doesn't contan label: {0}" raise AssertionError(msg.format(label)) def assert_divisions(ddf): if not hasattr(ddf, 'divisions'): return if not hasattr(ddf, 'index'): return if not ddf.known_divisions: return index = lambda x: x if isinstance(x, pd.Index) else x.index results = get_sync(ddf.dask, ddf._keys()) for i, df in enumerate(results[:-1]): if len(df): assert index(df).min() >= ddf.divisions[i] assert index(df).max() < ddf.divisions[i + 1] if len(results[-1]): assert index(results[-1]).min() >= ddf.divisions[-2] assert index(results[-1]).max() <= ddf.divisions[-1] def assert_sane_keynames(ddf): if not hasattr(ddf, 'dask'): return for k in ddf.dask.keys(): while isinstance(k, tuple): k = k[0] assert isinstance(k, (str, bytes)) assert len(k) < 100 assert ' ' not in k if sys.version_info[0] >= 3: assert k.split('-')[0].isidentifier() def assert_dask_dtypes(ddf, res, numeric_equal=True): """Check that the dask metadata matches the result. If `numeric_equal`, integer and floating dtypes compare equal. This is useful due to the implicit conversion of integer to floating upon encountering missingness, which is hard to infer statically.""" eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike if numeric_equal: eq_types.update(('i', 'f')) if isinstance(res, pd.DataFrame): for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples(): assert (a.kind in eq_types and b.kind in eq_types) or (a == b) elif isinstance(res, (pd.Series, pd.Index)): a = ddf._meta.dtype b = res.dtype assert (a.kind in eq_types and b.kind in eq_types) or (a == b) else: if hasattr(ddf._meta, 'dtype'): a = ddf._meta.dtype if not hasattr(res, 'dtype'): assert np.isscalar(res) b = np.dtype(type(res)) else: b = res.dtype assert (a.kind in eq_types and b.kind in eq_types) or (a == b) else: assert type(ddf._meta) == type(res) def assert_max_deps(x, n, eq=True): dependencies, dependents = get_deps(x.dask) if eq: assert max(map(len, dependencies.values())) == n else: assert max(map(len, dependencies.values())) <= n
bsd-3-clause
biocore/qiime
scripts/categorized_dist_scatterplot.py
15
6299
#!/usr/bin/env python # File created on 19 Jan 2011 from __future__ import division __author__ = "Justin Kuczynski" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Justin Kuczynski"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Justin Kuczynski" __email__ = "justinak@gmail.com" from qiime.util import make_option import os import warnings warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found') from qiime.parse import parse_distmat_to_dict, parse_mapping_file,\ mapping_file_to_dict from qiime.util import parse_command_line_parameters import numpy import matplotlib.pyplot as plt from qiime.categorized_dist_scatterplot import get_avg_dists, get_sam_ids script_info = {} script_info[ 'brief_description'] = "Create a categorized distance scatterplot representing average distances between samples, broken down by categories" script_info[ 'script_description'] = "Create a figure representing average distances between samples, broken down by categories. I call it a 'categorized distance scatterplot'. See script usage for more details. The mapping file specifies the relevant data - if you have e.g. 'N/A' values or samples you don't want included, first use filter_samples_from_otu_table.py to remove unwanted samples from the mapping file, and thus the analysis. Note that the resulting plot will include only samples in both the mapping file AND the distance matrix." script_info['script_usage'] = [( "Canonical Example:", "Split samples by country. Within each country compare each child to all adults. Plot the average distance from that child to all adults, vs. the age of that child", "python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -s AgeCategory:Adult -a AgeYears -o fig1.png"), ("Example 2:", "Same as above, but compares Child with all other categories (e.g.: NA, Infant, etc.)", "python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -a AgeYears -o fig1.svg")] script_info[ 'output_description'] = "a figure and the text dat for that figure " script_info['required_options'] = [ make_option('-m', '--map', type='existing_filepath', help='mapping file'), make_option('-d', '--distance_matrix', type='existing_filepath', help='distance matrix'), make_option('-p', '--primary_state', type='string', help="Samples matching this state will be plotted. E.g.: AgeCategory:Child . See qiime's filter_samples_from_otu_table.py for more syntax options"), make_option('-a', '--axis_category', type='string', help='this will form the horizontal axis of the figure, e.g.: AgeYears . Must be numbers'), make_option('-o', '--output_path', type='new_dirpath', help='output figure, filename extention determines format. E.g.: "fig1.png" or similar. A "fig1.txt" or similar will also be created with the data underlying the figure'), ] script_info['optional_options'] = [ make_option('-c', '--colorby', type='string', help='samples will first be separated by this column of the mapping file. They will be colored by this column of the mapping file, and all comparisons will be done only among samples with the same value in this column. e.g.: Country. You may omit -c, and the samples will not be separated'), make_option('-s', '--secondary_state', type='string', help='all samples matching the primary state will be compared to samples matcthing this secondary state. E.g.: AgeCategory:Adult'), ] script_info['version'] = __version__ def main(): option_parser, opts, args =\ parse_command_line_parameters(**script_info) map_data, map_header, map_comments = parse_mapping_file( open(opts.map, 'U')) map_dict = mapping_file_to_dict(map_data, map_header) distdict = parse_distmat_to_dict(open(opts.distance_matrix, 'U')) if opts.colorby is None: colorby_cats = [None] else: colorby_idx = map_header.index(opts.colorby) colorby_cats = list(set([map_data[i][colorby_idx] for i in range(len(map_data))])) textfilename = os.path.splitext(opts.output_path)[0] + '.txt' text_fh = open(textfilename, 'w') text_fh.write(opts.axis_category + '\tdistance\tSampleID' + '\n') colorby_cats.sort() plt.figure() for cat_num, cat in enumerate(colorby_cats): # collect the primary and secondary samples within this category state1_samids, state2_samids = get_sam_ids(map_data, map_header, opts.colorby, cat, opts.primary_state, opts.secondary_state) state1_samids =\ list(set(state1_samids).intersection(set(distdict.keys()))) state2_samids =\ list(set(state2_samids).intersection(set(distdict.keys()))) if state1_samids == [] or state2_samids == [] or \ (len(state1_samids) == 1 and state1_samids == state2_samids): raise RuntimeError("one category of samples didn't have any valid" + " distances. try eliminating samples from -p or -s, or changing" + " your mapping file with filter_samples_from_otu_table.py") # go through dmtx state1_avg_dists = get_avg_dists( state1_samids, state2_samids, distdict) # plot xvals = [float(map_dict[sam][opts.axis_category]) for sam in state1_samids] try: color = plt.cm.jet(cat_num / (len(colorby_cats) - 1)) except ZeroDivisionError: # only one cat color = 'b' plt.scatter(xvals, state1_avg_dists, edgecolors=color, alpha=.5, facecolors='none') plt.xlabel(opts.axis_category) plt.ylabel('average distance') lines = [str(xvals[i]) + '\t' + str(state1_avg_dists[i]) + '\t' + state1_samids[i] + '\n' for i in range(len(xvals))] text_fh.writelines(lines) if opts.colorby is not None: plt.legend(colorby_cats) plt.savefig(opts.output_path) if __name__ == "__main__": main()
gpl-2.0
AIML/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
305
4121
""" Testing for the nearest centroid module. """ import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from numpy.testing import assert_equal from sklearn.neighbors import NearestCentroid from sklearn import datasets from sklearn.metrics.pairwise import pairwise_distances # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] X_csr = sp.csr_matrix(X) # Sparse matrix y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] T_csr = sp.csr_matrix(T) true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_classification_toy(): # Check classification on a toy dataset, including sparse versions. clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # Same test, but with a sparse matrix to fit and test. clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit with sparse, test with non-sparse clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T), true_result) # Fit with non-sparse, test with sparse clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit and predict with non-CSR sparse matrices clf = NearestCentroid() clf.fit(X_csr.tocoo(), y) assert_array_equal(clf.predict(T_csr.tolil()), true_result) def test_precomputed(): clf = NearestCentroid(metric="precomputed") clf.fit(X, y) S = pairwise_distances(T, clf.centroids_) assert_array_equal(clf.predict(S), true_result) def test_iris(): # Check consistency on dataset iris. for metric in ('euclidean', 'cosine'): clf = NearestCentroid(metric=metric).fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.9, "Failed with score = " + str(score) def test_iris_shrinkage(): # Check consistency on dataset iris, when using shrinkage. for metric in ('euclidean', 'cosine'): for shrink_threshold in [None, 0.1, 0.5]: clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold) clf = clf.fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.8, "Failed with score = " + str(score) def test_pickle(): import pickle # classification obj = NearestCentroid() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_array_equal(score, score2, "Failed to generate same score" " after pickling (classification).") def test_shrinkage_threshold_decoded_y(): clf = NearestCentroid(shrink_threshold=0.01) y_ind = np.asarray(y) y_ind[y_ind == -1] = 0 clf.fit(X, y_ind) centroid_encoded = clf.centroids_ clf.fit(X, y) assert_array_equal(centroid_encoded, clf.centroids_) def test_predict_translated_data(): # Test that NearestCentroid gives same results on translated data rng = np.random.RandomState(0) X = rng.rand(50, 50) y = rng.randint(0, 3, 50) noise = rng.rand(50) clf = NearestCentroid(shrink_threshold=0.1) clf.fit(X, y) y_init = clf.predict(X) clf = NearestCentroid(shrink_threshold=0.1) X_noise = X + noise clf.fit(X_noise, y) y_translate = clf.predict(X_noise) assert_array_equal(y_init, y_translate) def test_manhattan_metric(): # Test the manhattan metric. clf = NearestCentroid(metric='manhattan') clf.fit(X, y) dense_centroid = clf.centroids_ clf.fit(X_csr, y) assert_array_equal(clf.centroids_, dense_centroid) assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
bsd-3-clause
wzbozon/statsmodels
statsmodels/regression/tests/test_robustcov.py
25
31412
# -*- coding: utf-8 -*- """Testing OLS robust covariance matrices against STATA Created on Mon Oct 28 15:25:14 2013 Author: Josef Perktold """ import numpy as np from scipy import stats from numpy.testing import (assert_allclose, assert_equal, assert_warns, assert_raises) from statsmodels.regression.linear_model import OLS, WLS import statsmodels.stats.sandwich_covariance as sw from statsmodels.tools.tools import add_constant from statsmodels.datasets import macrodata from statsmodels.tools.sm_exceptions import InvalidTestWarning from .results import results_macro_ols_robust as res from .results import results_grunfeld_ols_robust_cluster as res2 #test_hac_simple(): class CheckOLSRobust(object): def test_basic(self): res1 = self.res1 res2 = self.res2 rtol = getattr(self, 'rtol', 1e-10) assert_allclose(res1.params, res2.params, rtol=rtol) assert_allclose(self.bse_robust, res2.bse, rtol=rtol) assert_allclose(self.cov_robust, res2.cov, rtol=rtol) def test_tests(self): # Note: differences between small (t-distribution, ddof) and large (normal) # F statistic has no ddof correction in large, but uses F distribution (?) res1 = self.res1 res2 = self.res2 rtol = getattr(self, 'rtol', 1e-10) rtolh = getattr(self, 'rtolh', 1e-12) mat = np.eye(len(res1.params)) tt = res1.t_test(mat, cov_p=self.cov_robust) # has 'effect', 'pvalue', 'sd', 'tvalue' # TODO confint missing assert_allclose(tt.effect, res2.params, rtol=rtol) assert_allclose(tt.sd, res2.bse, rtol=rtol) assert_allclose(tt.tvalue, res2.tvalues, rtol=rtol) if self.small: assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol) else: pval = stats.norm.sf(np.abs(tt.tvalue)) * 2 assert_allclose(pval, res2.pvalues, rtol=5 * rtol, atol=1e-25) ft = res1.f_test(mat[:-1], cov_p=self.cov_robust) if self.small: #'df_denom', 'df_num', 'fvalue', 'pvalue' assert_allclose(ft.fvalue, res2.F, rtol=rtol) # f-pvalue is not directly available in Stata results, but is in ivreg2 if hasattr(res2, 'Fp'): assert_allclose(ft.pvalue, res2.Fp, rtol=rtol) else: if not getattr(self, 'skip_f', False): dof_corr = res1.df_resid * 1. / res1.nobs assert_allclose(ft.fvalue * dof_corr, res2.F, rtol=rtol) if hasattr(res2, 'df_r'): assert_equal(ft.df_num, res2.df_m) assert_equal(ft.df_denom, res2.df_r) else: # ivreg2 assert_equal(ft.df_num, res2.Fdf1) assert_equal(ft.df_denom, res2.Fdf2) # SMOKE tt.summary() ft.summary() tt.summary_frame() class TestOLSRobust1(CheckOLSRobust): # compare with regress robust def setup(self): res_ols = self.res1 self.bse_robust = res_ols.HC1_se self.cov_robust = res_ols.cov_HC1 self.small = True self.res2 = res.results_hc0 @classmethod def setup_class(cls): d2 = macrodata.load().data g_gdp = 400*np.diff(np.log(d2['realgdp'])) g_inv = 400*np.diff(np.log(d2['realinv'])) exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False) cls.res1 = res_ols = OLS(g_inv, exogg).fit() class TestOLSRobust2(TestOLSRobust1): # compare with ivreg robust small def setup(self): res_ols = self.res1 self.bse_robust = res_ols.HC1_se self.cov_robust = res_ols.cov_HC1 self.small = True self.res2 = res.results_ivhc0_small class TestOLSRobust3(TestOLSRobust1): # compare with ivreg robust (not small) def setup(self): res_ols = self.res1 self.bse_robust = res_ols.HC0_se self.cov_robust = res_ols.cov_HC0 self.small = False self.res2 = res.results_ivhc0_large class TestOLSRobustHacSmall(TestOLSRobust1): # compare with ivreg robust small def setup(self): res_ols = self.res1 cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust = se1 self.cov_robust = cov1 self.small = True self.res2 = res.results_ivhac4_small class TestOLSRobustHacLarge(TestOLSRobust1): # compare with ivreg robust (not small) def setup(self): res_ols = self.res1 cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust = se1 self.cov_robust = cov1 self.small = False self.res2 = res.results_ivhac4_large class CheckOLSRobustNewMixin(object): # This uses the robust covariance as default covariance def test_compare(self): rtol = getattr(self, 'rtol', 1e-10) assert_allclose(self.cov_robust, self.cov_robust2, rtol=rtol) assert_allclose(self.bse_robust, self.bse_robust2, rtol=rtol) def test_fvalue(self): if not getattr(self, 'skip_f', False): rtol = getattr(self, 'rtol', 1e-10) assert_allclose(self.res1.fvalue, self.res2.F, rtol=rtol) if hasattr(self.res2, 'Fp'): #only available with ivreg2 assert_allclose(self.res1.f_pvalue, self.res2.Fp, rtol=rtol) def test_confint(self): rtol = getattr(self, 'rtol', 1e-10) ci1 = self.res1.conf_int() ci2 = self.res2.params_table[:,4:6] assert_allclose(ci1, ci2, rtol=rtol) # check critical value crit1 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse crit2 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse assert_allclose(crit1, crit2, rtol=12) def test_ttest(self): res1 = self.res1 res2 = self.res2 rtol = getattr(self, 'rtol', 1e-10) rtolh = getattr(self, 'rtol', 1e-12) mat = np.eye(len(res1.params)) tt = res1.t_test(mat, cov_p=self.cov_robust) # has 'effect', 'pvalue', 'sd', 'tvalue' # TODO confint missing assert_allclose(tt.effect, res2.params, rtol=rtolh) assert_allclose(tt.sd, res2.bse, rtol=rtol) assert_allclose(tt.tvalue, res2.tvalues, rtol=rtolh) assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol) ci1 = tt.conf_int() ci2 = self.res2.params_table[:,4:6] assert_allclose(ci1, ci2, rtol=rtol) def test_scale(self): res1 = self.res1 res2 = self.res2 rtol = 1e-5 # Note we always use df_resid for scale # Stata uses nobs or df_resid for rmse, not always available in Stata #assert_allclose(res1.scale, res2.rmse**2 * res2.N / (res2.N - res2.df_m - 1), rtol=rtol) skip = False if hasattr(res2, 'rss'): scale = res2.rss / (res2.N - res2.df_m - 1) elif hasattr(res2, 'rmse'): scale = res2.rmse**2 else: skip = True if isinstance(res1.model, WLS): skip = True # Stata uses different scaling and using unweighted resid for rmse if not skip: assert_allclose(res1.scale, scale, rtol=rtol) if not res2.vcetype == 'Newey-West': # no rsquared in Stata r2 = res2.r2 if hasattr(res2, 'r2') else res2.r2c assert_allclose(res1.rsquared, r2, rtol=rtol, err_msg=str(skip)) # consistency checks, not against Stata df_resid = res1.nobs - res1.df_model - 1 assert_equal(res1.df_resid, df_resid) # variance of resid_pearson is 1, with ddof, and loc=0 psum = (res1.resid_pearson**2).sum() assert_allclose(psum, df_resid, rtol=1e-13) def test_smoke(self): self.res1.summary() class TestOLSRobust2SmallNew(TestOLSRobust1, CheckOLSRobustNewMixin): # compare with ivreg robust small def setup(self): res_ols = self.res1.get_robustcov_results('HC1', use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() self.bse_robust2 = res_ols.HC1_se self.cov_robust2 = res_ols.cov_HC1 self.small = True self.res2 = res.results_ivhc0_small def test_compare(self): #check that we get a warning using the nested compare methods res1 = self.res1 endog = res1.model.endog exog = res1.model.exog[:, [0, 2]] # drop one variable res_ols2 = OLS(endog, exog).fit() # results from Stata r_pval = .0307306938402991 r_chi2 = 4.667944083588736 r_df = 1 assert_warns(InvalidTestWarning, res1.compare_lr_test, res_ols2) import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") chi2, pval, df = res1.compare_lr_test(res_ols2) assert_allclose(chi2, r_chi2, rtol=1e-11) assert_allclose(pval, r_pval, rtol=1e-11) assert_equal(df, r_df) assert_warns(InvalidTestWarning, res1.compare_f_test, res_ols2) #fva, pval, df = res1.compare_f_test(res_ols2) class TestOLSRobustHACSmallNew(TestOLSRobust1, CheckOLSRobustNewMixin): # compare with ivreg robust small def setup(self): res_ols = self.res1.get_robustcov_results('HAC', maxlags=4, use_correction=True, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res.results_ivhac4_small class TestOLSRobust2LargeNew(TestOLSRobust1, CheckOLSRobustNewMixin): # compare with ivreg robust small def setup(self): res_ols = self.res1.get_robustcov_results('HC0') res_ols.use_t = False self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() self.bse_robust2 = res_ols.HC0_se self.cov_robust2 = res_ols.cov_HC0 self.small = False self.res2 = res.results_ivhc0_large # TODO: skipping next two for now, not refactored yet for `large` def test_fvalue(self): pass def test_confint(self): pass ####################################################### # cluster robust standard errors ####################################################### class CheckOLSRobustCluster(CheckOLSRobust): # compare with regress robust @classmethod def setup_class(cls): #import pandas as pa from statsmodels.datasets import grunfeld dtapa = grunfeld.data.load_pandas() #Stata example/data seems to miss last firm dtapa_endog = dtapa.endog[:200] dtapa_exog = dtapa.exog[:200] exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False) #asserts don't work for pandas cls.res1 = OLS(dtapa_endog, exog).fit() firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'), return_inverse=True) cls.groups = firm_id #time indicator in range(max Ti) time = np.asarray(dtapa_exog[['year']]) time -= time.min() cls.time = np.squeeze(time).astype(int) # nw_panel function requires interval bounds cls.tidx = [(i*20, 20*(i+1)) for i in range(10)] class TestOLSRobustCluster2(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=self.groups, use_correction=True, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_cluster self.rtol = 1e-6 self.rtolh = 1e-10 class TestOLSRobustCluster2Input(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): import pandas as pd fat_array = self.groups.reshape(-1, 1) fat_groups = pd.DataFrame(fat_array) res_ols = self.res1.get_robustcov_results('cluster', groups=fat_groups, use_correction=True, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_cluster self.rtol = 1e-6 self.rtolh = 1e-10 def test_too_many_groups(self): long_groups = self.groups.reshape(-1, 1) groups3 = np.hstack((long_groups, long_groups, long_groups)) assert_raises(ValueError, self.res1.get_robustcov_results,'cluster', groups=groups3, use_correction=True, use_t=True) def test_2way_dataframe(self): import pandas as pd long_groups = self.groups.reshape(-1, 1) groups2 = pd.DataFrame(np.hstack((long_groups, long_groups))) res = self.res1.get_robustcov_results( 'cluster', groups=groups2, use_correction=True, use_t=True) class TestOLSRobustCluster2Fit(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # copy, past uses fit method # compare with `reg cluster` def setup(self): res_ols = self.res1.model.fit(cov_type='cluster', cov_kwds=dict( groups=self.groups, use_correction=True, use_t=True)) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_cluster self.rtol = 1e-6 self.rtolh = 1e-10 def test_basic_inference(self): res1 = self.res1 res2 = self.res2 rtol = 1e-7 assert_allclose(res1.params, res2.params, rtol=1e-8) assert_allclose(res1.bse, res2.bse, rtol=rtol) assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20) ci = res2.params_table[:, 4:6] assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20) class TestOLSRobustCluster2Large(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=self.groups, use_correction=False, use_t=False, df_correction=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = False self.res2 = res2.results_cluster_large self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 # skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741 def test_f_value(self): pass class TestOLSRobustCluster2LargeFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): model = OLS(self.res1.model.endog, self.res1.model.exog) #res_ols = self.res1.model.fit(cov_type='cluster', res_ols = model.fit(cov_type='cluster', cov_kwds=dict(groups=self.groups, use_correction=False, use_t=False, df_correction=True)) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = False self.res2 = res2.results_cluster_large self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 # skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741 def t_est_fvalue(self): pass class TestOLSRobustClusterGS(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('nw-groupsum', time=self.time, maxlags=4, use_correction=False, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_nw_groupsum4 self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 class TestOLSRobustClusterGSFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.model.fit(cov_type='nw-groupsum', cov_kwds=dict(time=self.time, maxlags=4, use_correction=False, use_t=True)) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_nw_groupsum4 self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 class TestOLSRobustClusterNWP(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('nw-panel', time=self.time, maxlags=4, use_correction='hac', use_t=True, df_correction=False) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_nw_panel(self.res1, 4, self.tidx) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_nw_panel4 self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 # TODO: low precision/agreement class TestOLSRobustCluster2G(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=(self.groups, self.time), use_correction=True, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time, use_correction=True)[0] se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_cluster_2groups_small self.rtol = 0.35 # only f_pvalue and confint for constant differ >rtol=0.05 self.rtolh = 1e-10 class TestOLSRobustCluster2GLarge(CheckOLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=(self.groups, self.time), use_correction=False, #True, use_t=False) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time, use_correction=False)[0] se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = False self.res2 = res2.results_cluster_2groups_large self.skip_f = True self.rtol = 1e-7 self.rtolh = 1e-10 ###################################### # WLS ###################################### class CheckWLSRobustCluster(CheckOLSRobust): # compare with regress robust @classmethod def setup_class(cls): #import pandas as pa from statsmodels.datasets import grunfeld dtapa = grunfeld.data.load_pandas() #Stata example/data seems to miss last firm dtapa_endog = dtapa.endog[:200] dtapa_exog = dtapa.exog[:200] exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False) #asserts don't work for pandas cls.res1 = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit() firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'), return_inverse=True) cls.groups = firm_id #time indicator in range(max Ti) time = np.asarray(dtapa_exog[['year']]) time -= time.min() cls.time = np.squeeze(time).astype(int) # nw_panel function requires interval bounds cls.tidx = [(i*20, 20*(i+1)) for i in range(10)] # not available yet for WLS class TestWLSRobustCluster2(CheckWLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=self.groups, use_correction=True, use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_cluster_wls_small self.rtol = 1e-6 self.rtolh = 1e-10 # not available yet for WLS class TestWLSRobustCluster2Large(CheckWLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('cluster', groups=self.groups, use_correction=False, use_t=False, df_correction=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False) se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = False self.res2 = res2.results_cluster_wls_large self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 class TestWLSRobustSmall(CheckWLSRobustCluster, CheckOLSRobustNewMixin): # compare with `reg cluster` def setup(self): res_ols = self.res1.get_robustcov_results('HC1', use_t=True) self.res3 = self.res1 self.res1 = res_ols self.bse_robust = res_ols.bse self.cov_robust = res_ols.cov_params() #TODO: check standalone function #cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False) cov1 = res_ols.cov_HC1 se1 = sw.se_cov(cov1) self.bse_robust2 = se1 self.cov_robust2 = cov1 self.small = True self.res2 = res2.results_hc1_wls_small self.skip_f = True self.rtol = 1e-6 self.rtolh = 1e-10 class TestWLSOLSRobustSmall(object): @classmethod def setup_class(cls): #import pandas as pa from statsmodels.datasets import grunfeld dtapa = grunfeld.data.load_pandas() #Stata example/data seems to miss last firm dtapa_endog = dtapa.endog[:200] dtapa_exog = dtapa.exog[:200] exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False) #asserts don't work for pandas cls.res_wls = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit() w_sqrt = 1 / np.sqrt(np.asarray(dtapa_exog['value'])) cls.res_ols = OLS(dtapa_endog * w_sqrt, np.asarray(exog) * w_sqrt[:, None]).fit() # hasconst=True ? firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'), return_inverse=True) cls.groups = firm_id #time indicator in range(max Ti) time = np.asarray(dtapa_exog[['year']]) time -= time.min() cls.time = np.squeeze(time).astype(int) # nw_panel function requires interval bounds cls.tidx = [(i*20, 20*(i+1)) for i in range(10)] def test_all(self): all_cov = [('HC0', dict(use_t=True)), ('HC1', dict(use_t=True)), ('HC2', dict(use_t=True)), ('HC3', dict(use_t=True))] # fvalue are not the same, see #1212 #res_ols = self.res_ols #res_wls = self.res_wls #assert_allclose(res_ols.fvalue, res_wls.fvalue, rtol=1e-13) #assert_allclose(res_ols.f_pvalue, res_wls.f_pvalue, rtol=1e-13) for cov_type, kwds in all_cov: res1 = self.res_ols.get_robustcov_results(cov_type, **kwds) res2 = self.res_wls.get_robustcov_results(cov_type, **kwds) assert_allclose(res1.params, res2.params, rtol=1e-13) assert_allclose(res1.cov_params(), res2.cov_params(), rtol=1e-13) assert_allclose(res1.bse, res2.bse, rtol=1e-13) assert_allclose(res1.pvalues, res2.pvalues, rtol=1e-13) #Note: Fvalue doesn't match up, difference in calculation ? # The only difference should be in the constant detection #assert_allclose(res1.fvalue, res2.fvalue, rtol=1e-13) #assert_allclose(res1.f_pvalue, res2.f_pvalue, rtol=1e-13) mat = np.eye(len(res1.params)) ft1 = res1.f_test(mat) ft2 = res2.f_test(mat) assert_allclose(ft1.fvalue, ft2.fvalue, rtol=1e-13) assert_allclose(ft1.pvalue, ft2.pvalue, rtol=1e-12) def test_fixed_scale(self): cov_type = 'fixed_scale' kwds = {} res1 = self.res_ols.get_robustcov_results(cov_type, **kwds) res2 = self.res_wls.get_robustcov_results(cov_type, **kwds) assert_allclose(res1.params, res2.params, rtol=1e-13) assert_allclose(res1.cov_params(), res2.cov_params(), rtol=1e-13) assert_allclose(res1.bse, res2.bse, rtol=1e-13) assert_allclose(res1.pvalues, res2.pvalues, rtol=1e-12) tt = res2.t_test(np.eye(len(res2.params)), cov_p=res2.normalized_cov_params) assert_allclose(res2.cov_params(), res2.normalized_cov_params, rtol=1e-13) assert_allclose(res2.bse, tt.sd, rtol=1e-13) assert_allclose(res2.pvalues, tt.pvalue, rtol=1e-13) assert_allclose(res2.tvalues, tt.tvalue, rtol=1e-13) # using cov_type in fit mod = self.res_wls.model mod3 = WLS(mod.endog, mod.exog, weights=mod.weights) res3 = mod3.fit(cov_type=cov_type, cov_kwds=kwds) tt = res3.t_test(np.eye(len(res3.params)), cov_p=res3.normalized_cov_params) assert_allclose(res3.cov_params(), res3.normalized_cov_params, rtol=1e-13) assert_allclose(res3.bse, tt.sd, rtol=1e-13) assert_allclose(res3.pvalues, tt.pvalue, rtol=1e-13) assert_allclose(res3.tvalues, tt.tvalue, rtol=1e-13) def test_cov_type_fixed_scale(): # this is a unit test from scipy curvefit for `absolute_sigma` keyword xdata = np.array([0, 1, 2, 3, 4, 5]) ydata = np.array([1, 1, 5, 7, 8, 12]) sigma = np.array([1, 2, 1, 2, 1, 2]) xdata = np.column_stack((xdata, np.ones(len(xdata)))) weights = 1. / sigma**2 res = WLS(ydata, xdata, weights=weights).fit() assert_allclose(res.bse, [0.20659803, 0.57204404], rtol=1e-3) res = WLS(ydata, xdata, weights=weights).fit() assert_allclose(res.bse, [0.20659803, 0.57204404], rtol=1e-3) res = WLS(ydata, xdata, weights=weights).fit(cov_type='fixed scale') assert_allclose(res.bse, [0.30714756, 0.85045308], rtol=1e-3) res = WLS(ydata, xdata, weights=weights / 9.).fit(cov_type='fixed scale') assert_allclose(res.bse, [3*0.30714756, 3*0.85045308], rtol=1e-3) res = WLS(ydata, xdata, weights=weights).fit(cov_type='fixed scale', cov_kwds={'scale':9}) assert_allclose(res.bse, [3*0.30714756, 3*0.85045308], rtol=1e-3)
bsd-3-clause
nolanliou/tensorflow
tensorflow/examples/learn/multiple_gpu.py
39
3957
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of using Estimator with multiple GPUs to distribute one model. This example only runs if you have multiple GPUs to assign to. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf X_FEATURE = 'x' # Name of the input feature. def my_model(features, labels, mode): """DNN with three hidden layers, and dropout of 0.1 probability. Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and CUDNN 6.5 V2 from NVIDIA need to be installed beforehand. Args: features: Dict of input `Tensor`. labels: Label `Tensor`. mode: One of `ModeKeys`. Returns: `EstimatorSpec`. """ # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. net = features[X_FEATURE] with tf.device('/device:GPU:1'): for units in [10, 20, 10]: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) net = tf.layers.dropout(net, rate=0.1) with tf.device('/device:GPU:2'): # Compute logits (1 per class). logits = tf.layers.dense(net, 3, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Compute loss. loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Create training op. if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize( loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = tf.estimator.Estimator(model_fn=my_model) # Train. train_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=100) # Predict. test_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
apache-2.0
piiswrong/mxnet
python/mxnet/model.py
13
41314
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines # pylint: disable=too-many-branches, too-many-statements """MXNet model module""" from __future__ import absolute_import, print_function import os import time import logging import warnings from collections import namedtuple import numpy as np from . import io from . import nd from . import symbol as sym from . import optimizer as opt from . import metric from . import kvstore as kvs from .context import Context, cpu from .initializer import Uniform from .optimizer import get_updater from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data from .io import DataDesc from .base import mx_real_t BASE_ESTIMATOR = object try: from sklearn.base import BaseEstimator BASE_ESTIMATOR = BaseEstimator except ImportError: SKLEARN_INSTALLED = False # Parameter to pass to batch_end_callback BatchEndParam = namedtuple('BatchEndParams', ['epoch', 'nbatch', 'eval_metric', 'locals']) def _create_kvstore(kvstore, num_device, arg_params): """Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights. """ update_on_kvstore = True if kvstore is None: kv = None elif isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): # create kvstore using the string type if num_device is 1 and 'dist' not in kvstore: # no need to use kv for single device and single machine kv = None else: kv = kvs.create(kvstore) if kvstore == 'local': # automatically select a proper local max_size = max(np.prod(param.shape) for param in arg_params.values()) if max_size > 1024 * 1024 * 16: update_on_kvstore = False else: raise TypeError('kvstore must be KVStore, str or None') if kv is None: update_on_kvstore = False return (kv, update_on_kvstore) def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore): """Initialize kvstore""" for idx, param_on_devs in enumerate(param_arrays): name = param_names[idx] kvstore.init(name, arg_params[name]) if update_on_kvstore: kvstore.pull(name, param_on_devs, priority=-idx) def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on NCCL kvstore.""" valid_indices = [index for index, grad_list in enumerate(grad_arrays) if grad_list[0] is not None] valid_grad_arrays = [grad_arrays[i] for i in valid_indices] valid_param_arrays = [param_arrays[i] for i in valid_indices] valid_param_names = [param_names[i] for i in valid_indices] size = len(valid_grad_arrays) start = 0 # Use aggregation by default only with NCCL default_batch = 16 batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch)) while start < size: end = start + batch if start + batch < size else size # push gradient, priority is negative index kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start) # pull back the weights kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start) start = end def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on kvstore.""" for index, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the weights kvstore.pull(name, arg_list, priority=-index) def _update_params(param_arrays, grad_arrays, updater, num_device, kvstore=None, param_names=None): """Perform update of param_arrays from grad_arrays not on kvstore.""" for i, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue index = i if kvstore: name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the sum gradients, to the same locations. kvstore.pull(name, grad_list, priority=-index) for k, p in enumerate(zip(arg_list, grad_list)): # faked an index here, to make optimizer create diff # state for the same index but on diff devs, TODO(mli) # use a better solution later w, g = p updater(index*num_device+k, g, w) def _multiple_callbacks(callbacks, *args, **kwargs): """Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list. """ if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs) def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names, arg_params, aux_params, begin_epoch, end_epoch, epoch_size, optimizer, kvstore, update_on_kvstore, train_data, eval_data=None, eval_metric=None, epoch_end_callback=None, batch_end_callback=None, logger=None, work_load_list=None, monitor=None, eval_end_callback=None, eval_batch_end_callback=None, sym_gen=None): """Internal training function on multiple devices. This function will also work for single device as well. Parameters ---------- symbol : Symbol The network configuration. ctx : list of Context The training devices. arg_names: list of str Name of all arguments of the network. param_names: list of str Name of all trainable parameters of the network. aux_names: list of str Name of all auxiliary states of the network. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. begin_epoch : int The begining training epoch. end_epoch : int The end training epoch. epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : Optimizer The optimization algorithm train_data : DataIter Training data iterator. eval_data : DataIter Validation data iterator. eval_metric : EvalMetric An evaluation function or a list of evaluation functions. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback : callable(BatchEndParams) A callback that is invoked at end of each batch. This can be used to measure speed, get result from evaluation metric. etc. kvstore : KVStore The KVStore. update_on_kvstore : bool Whether or not perform weight updating on kvstore. logger : logging logger When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as ``ctx``. monitor : Monitor, optional Monitor installed to executor, for monitoring outputs, weights, and gradients for debugging. Notes ----- - This function will inplace update the NDArrays in `arg_params` and `aux_states`. """ if logger is None: logger = logging executor_manager = DataParallelExecutorManager(symbol=symbol, sym_gen=sym_gen, ctx=ctx, train_data=train_data, param_names=param_names, arg_names=arg_names, aux_names=aux_names, work_load_list=work_load_list, logger=logger) if monitor: executor_manager.install_monitor(monitor) executor_manager.set_params(arg_params, aux_params) if not update_on_kvstore: updater = get_updater(optimizer) if kvstore: _initialize_kvstore(kvstore=kvstore, param_arrays=executor_manager.param_arrays, arg_params=arg_params, param_names=executor_manager.param_names, update_on_kvstore=update_on_kvstore) if update_on_kvstore: kvstore.set_optimizer(optimizer) # Now start training train_data.reset() for epoch in range(begin_epoch, end_epoch): # Training phase tic = time.time() eval_metric.reset() nbatch = 0 # Iterate over training data. while True: do_reset = True for data_batch in train_data: executor_manager.load_data_batch(data_batch) if monitor is not None: monitor.tic() executor_manager.forward(is_train=True) executor_manager.backward() if update_on_kvstore: if 'nccl' in kvstore.type: _update_params_on_kvstore_nccl(executor_manager.param_arrays, executor_manager.grad_arrays, kvstore, executor_manager.param_names) else: _update_params_on_kvstore(executor_manager.param_arrays, executor_manager.grad_arrays, kvstore, executor_manager.param_names) else: _update_params(executor_manager.param_arrays, executor_manager.grad_arrays, updater=updater, num_device=len(ctx), kvstore=kvstore, param_names=executor_manager.param_names) if monitor is not None: monitor.toc_print() # evaluate at end, so we can lazy copy executor_manager.update_metric(eval_metric, data_batch.label) nbatch += 1 # batch callback (for print purpose) if batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(batch_end_callback, batch_end_params) # this epoch is done possibly earlier if epoch_size is not None and nbatch >= epoch_size: do_reset = False break if do_reset: logger.info('Epoch[%d] Resetting Data Iterator', epoch) train_data.reset() # this epoch is done if epoch_size is None or nbatch >= epoch_size: break toc = time.time() logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic)) if epoch_end_callback or epoch + 1 == end_epoch: executor_manager.copy_to(arg_params, aux_params) _multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params) # evaluation if eval_data: eval_metric.reset() eval_data.reset() total_num_batch = 0 for i, eval_batch in enumerate(eval_data): executor_manager.load_data_batch(eval_batch) executor_manager.forward(is_train=False) executor_manager.update_metric(eval_metric, eval_batch.label) if eval_batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=epoch, nbatch=i, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(eval_batch_end_callback, batch_end_params) total_num_batch += 1 if eval_end_callback is not None: eval_end_params = BatchEndParam(epoch=epoch, nbatch=total_num_batch, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(eval_end_callback, eval_end_params) eval_data.reset() # end of all epochs return def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): """Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if symbol is not None: symbol.save('%s-symbol.json' % prefix) save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()}) param_name = '%s-%04d.params' % (prefix, epoch) nd.save(param_name, save_dict) logging.info('Saved checkpoint to \"%s\"', param_name) def load_checkpoint(prefix, epoch): """Load model checkpoint from file. Parameters ---------- prefix : str Prefix of model name. epoch : int Epoch number of model we would like to load. Returns ------- symbol : Symbol The symbol configuration of computation network. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - Symbol will be loaded from ``prefix-symbol.json``. - Parameters will be loaded from ``prefix-epoch.params``. """ symbol = sym.load('%s-symbol.json' % prefix) save_dict = nd.load('%s-%04d.params' % (prefix, epoch)) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return (symbol, arg_params, aux_params) from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position class FeedForward(BASE_ESTIMATOR): """Model class of MXNet for training and predicting feedforward nets. This class is designed for a single-data single output supervised network. Parameters ---------- symbol : Symbol The symbol configuration of computation network. ctx : Context or list of Context, optional The device context of training and prediction. To use multi GPU training, pass in a list of gpu contexts. num_epoch : int, optional Training parameter, number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional Training parameter, name or optimizer object for training. initializer : initializer function, optional Training parameter, the initialization scheme used. numpy_batch_size : int, optional The batch size of training data. Only needed when input array is numpy. arg_params : dict of str to NDArray, optional Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray, optional Model parameter, dict of name to NDArray of net's auxiliary states. allow_extra_params : boolean, optional Whether allow extra parameters that are not needed by symbol to be passed by aux_params and ``arg_params``. If this is True, no error will be thrown when ``aux_params`` and ``arg_params`` contain more parameters than needed. begin_epoch : int, optional The begining training epoch. kwargs : dict The additional keyword arguments passed to optimizer. """ def __init__(self, symbol, ctx=None, num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01), numpy_batch_size=128, arg_params=None, aux_params=None, allow_extra_params=False, begin_epoch=0, **kwargs): warnings.warn( '\033[91mmxnet.model.FeedForward has been deprecated. ' + \ 'Please use mxnet.mod.Module instead.\033[0m', DeprecationWarning, stacklevel=2) if isinstance(symbol, sym.Symbol): self.symbol = symbol self.sym_gen = None else: assert(callable(symbol)) self.symbol = None self.sym_gen = symbol # model parameters self.arg_params = arg_params self.aux_params = aux_params self.allow_extra_params = allow_extra_params self.argument_checked = False if self.sym_gen is None: self._check_arguments() # basic configuration if ctx is None: ctx = [cpu()] elif isinstance(ctx, Context): ctx = [ctx] self.ctx = ctx # training parameters self.num_epoch = num_epoch self.epoch_size = epoch_size self.kwargs = kwargs.copy() self.optimizer = optimizer self.initializer = initializer self.numpy_batch_size = numpy_batch_size # internal helper state self._pred_exec = None self.begin_epoch = begin_epoch def _check_arguments(self): """verify the argument of the default symbol and user provided parameters""" if self.argument_checked: return assert(self.symbol is not None) self.argument_checked = True # check if symbol contain duplicated names. _check_arguments(self.symbol) # rematch parameters to delete useless ones if self.allow_extra_params: if self.arg_params: arg_names = set(self.symbol.list_arguments()) self.arg_params = {k : v for k, v in self.arg_params.items() if k in arg_names} if self.aux_params: aux_names = set(self.symbol.list_auxiliary_states()) self.aux_params = {k : v for k, v in self.aux_params.items() if k in aux_names} @staticmethod def _is_data_arg(name): """Check if name is a data argument.""" return name.endswith('data') or name.endswith('label') def _init_params(self, inputs, overwrite=False): """Initialize weight parameters and auxiliary states.""" inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs] input_shapes = {item.name: item.shape for item in inputs} arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None input_dtypes = {item.name: item.dtype for item in inputs} arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes) assert arg_dtypes is not None arg_names = self.symbol.list_arguments() input_names = input_shapes.keys() param_names = [key for key in arg_names if key not in input_names] aux_names = self.symbol.list_auxiliary_states() param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes) if x[0] in param_names] arg_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in param_name_attrs} aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes) if x[0] in aux_names] aux_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in aux_name_attrs} for k, v in arg_params.items(): if self.arg_params and k in self.arg_params and (not overwrite): arg_params[k][:] = self.arg_params[k][:] else: self.initializer(k, v) for k, v in aux_params.items(): if self.aux_params and k in self.aux_params and (not overwrite): aux_params[k][:] = self.aux_params[k][:] else: self.initializer(k, v) self.arg_params = arg_params self.aux_params = aux_params return (arg_names, list(param_names), aux_names) def __getstate__(self): this = self.__dict__.copy() this['_pred_exec'] = None return this def __setstate__(self, state): self.__dict__.update(state) def _init_predictor(self, input_shapes, type_dict=None): """Initialize the predictor module for running prediction.""" if self._pred_exec is not None: arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes)) assert arg_shapes is not None, "Incomplete input shapes" pred_shapes = [x.shape for x in self._pred_exec.arg_arrays] if arg_shapes == pred_shapes: return # for now only use the first device pred_exec = self.symbol.simple_bind( self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes)) pred_exec.copy_params_from(self.arg_params, self.aux_params) _check_arguments(self.symbol) self._pred_exec = pred_exec def _init_iter(self, X, y, is_train): """Initialize the iterator given input.""" if isinstance(X, (np.ndarray, nd.NDArray)): if y is None: if is_train: raise ValueError('y must be specified when X is numpy.ndarray') else: y = np.zeros(X.shape[0]) if not isinstance(y, (np.ndarray, nd.NDArray)): raise TypeError('y must be ndarray when X is numpy.ndarray') if X.shape[0] != y.shape[0]: raise ValueError("The numbers of data points and labels not equal") if y.ndim == 2 and y.shape[1] == 1: y = y.flatten() if y.ndim != 1: raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)") if is_train: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=is_train, last_batch_handle='roll_over') else: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False) if not isinstance(X, io.DataIter): raise TypeError('X must be DataIter, NDArray or numpy.ndarray') return X def _init_eval_iter(self, eval_data): """Initialize the iterator given eval_data.""" if eval_data is None: return eval_data if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2: if eval_data[0] is not None: if eval_data[1] is None and isinstance(eval_data[0], io.DataIter): return eval_data[0] input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list) else eval_data[0]) input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list) else eval_data[1]) return self._init_iter(input_data, input_label, is_train=True) else: raise ValueError("Eval data is NONE") if not isinstance(eval_data, io.DataIter): raise TypeError('Eval data must be DataIter, or ' \ 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)') return eval_data def predict(self, X, num_batch=None, return_data=False, reset=True): """Run the prediction, always only use one device. Parameters ---------- X : mxnet.DataIter num_batch : int or None The number of batch to run. Go though all batches if ``None``. Returns ------- y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs. The predicted value of the output. """ X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) batch_size = X.batch_size data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] output_list = [[] for _ in range(len(self._pred_exec.outputs))] if return_data: data_list = [[] for _ in X.provide_data] label_list = [[] for _ in X.provide_label] i = 0 for batch in X: _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) padded = batch.pad real_size = batch_size - padded for o_list, o_nd in zip(output_list, self._pred_exec.outputs): o_list.append(o_nd[0:real_size].asnumpy()) if return_data: for j, x in enumerate(batch.data): data_list[j].append(x[0:real_size].asnumpy()) for j, x in enumerate(batch.label): label_list[j].append(x[0:real_size].asnumpy()) i += 1 if num_batch is not None and i == num_batch: break outputs = [np.concatenate(x) for x in output_list] if len(outputs) == 1: outputs = outputs[0] if return_data: data = [np.concatenate(x) for x in data_list] label = [np.concatenate(x) for x in label_list] if len(data) == 1: data = data[0] if len(label) == 1: label = label[0] return outputs, data, label else: return outputs def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True): """Run the model given an input and calculate the score as assessed by an evaluation metric. Parameters ---------- X : mxnet.DataIter eval_metric : metric.metric The metric for calculating score. num_batch : int or None The number of batches to run. Go though all batches if ``None``. Returns ------- s : float The final score. """ # setup metric if not isinstance(eval_metric, metric.EvalMetric): eval_metric = metric.create(eval_metric) X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] for i, batch in enumerate(X): if num_batch is not None and i == num_batch: break _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) eval_metric.update(batch.label, self._pred_exec.outputs) if batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=0, nbatch=i, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(batch_end_callback, batch_end_params) return eval_metric.get()[1] def fit(self, X, y=None, eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None, work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(), eval_batch_end_callback=None): """Fit the model. Parameters ---------- X : DataIter, or numpy.ndarray/NDArray Training data. If `X` is a `DataIter`, the name or (if name not available) the position of its outputs should match the corresponding variable names defined in the symbolic graph. y : numpy.ndarray/NDArray, optional Training set label. If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set. While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be the same as `X`, i.e. the number of data points and labels should be equal. eval_data : DataIter or numpy.ndarray/list/NDArray pair If eval_data is numpy.ndarray/list/NDArray pair, it should be ``(valid_data, valid_label)``. eval_metric : metric.EvalMetric or str or callable The evaluation metric. This could be the name of evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for purposes of printing. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async' In default uses 'local', often no need to change for single machiine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : float or int, optional The list of work load for different devices, in the same order as `ctx`. Note ---- KVStore behavior - 'local', multi-devices on a single machine, will automatically choose best type. - 'dist_sync', multiple machines communicating via BSP. - 'dist_async', multiple machines with asynchronous communication. """ data = self._init_iter(X, y, is_train=True) eval_data = self._init_eval_iter(eval_data) if self.sym_gen: self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member self._check_arguments() self.kwargs["sym"] = self.symbol arg_names, param_names, aux_names = \ self._init_params(data.provide_data+data.provide_label) # setup metric if not isinstance(eval_metric, metric.EvalMetric): eval_metric = metric.create(eval_metric) # create kvstore (kvstore, update_on_kvstore) = _create_kvstore( kvstore, len(self.ctx), self.arg_params) param_idx2name = {} if update_on_kvstore: param_idx2name.update(enumerate(param_names)) else: for i, n in enumerate(param_names): for k in range(len(self.ctx)): param_idx2name[i*len(self.ctx)+k] = n self.kwargs["param_idx2name"] = param_idx2name # init optmizer if isinstance(self.optimizer, str): batch_size = data.batch_size if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type: batch_size *= kvstore.num_workers optimizer = opt.create(self.optimizer, rescale_grad=(1.0/batch_size), **(self.kwargs)) elif isinstance(self.optimizer, opt.Optimizer): optimizer = self.optimizer # do training _train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names, self.arg_params, self.aux_params, begin_epoch=self.begin_epoch, end_epoch=self.num_epoch, epoch_size=self.epoch_size, optimizer=optimizer, train_data=data, eval_data=eval_data, eval_metric=eval_metric, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, update_on_kvstore=update_on_kvstore, logger=logger, work_load_list=work_load_list, monitor=monitor, eval_end_callback=eval_end_callback, eval_batch_end_callback=eval_batch_end_callback, sym_gen=self.sym_gen) def save(self, prefix, epoch=None): """Checkpoint the model checkpoint into file. You can also use `pickle` to do the job if you only work on Python. The advantage of `load` and `save` (as compared to `pickle`) is that the resulting file can be loaded from other MXNet language bindings. One can also directly `load`/`save` from/to cloud storage(S3, HDFS) Parameters ---------- prefix : str Prefix of model name. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if epoch is None: epoch = self.num_epoch assert epoch is not None save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params) @staticmethod def load(prefix, epoch, ctx=None, **kwargs): """Load model checkpoint from file. Parameters ---------- prefix : str Prefix of model name. epoch : int epoch number of model we would like to load. ctx : Context or list of Context, optional The device context of training and prediction. kwargs : dict Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`. Returns ------- model : FeedForward The loaded model that can be used for prediction. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ symbol, arg_params, aux_params = load_checkpoint(prefix, epoch) return FeedForward(symbol, ctx=ctx, arg_params=arg_params, aux_params=aux_params, begin_epoch=epoch, **kwargs) @staticmethod def create(symbol, X, y=None, ctx=None, num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01), eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None, work_load_list=None, eval_end_callback=LogValidationMetricsCallback(), eval_batch_end_callback=None, **kwargs): """Functional style to create a model. This function is more consistent with functional languages such as R, where mutation is not allowed. Parameters ---------- symbol : Symbol The symbol configuration of a computation network. X : DataIter Training data. y : numpy.ndarray, optional If `X` is a ``numpy.ndarray``, `y` must be set. ctx : Context or list of Context, optional The device context of training and prediction. To use multi-GPU training, pass in a list of GPU contexts. num_epoch : int, optional The number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional The name of the chosen optimizer, or an optimizer object, used for training. initializer : initializer function, optional The initialization scheme used. eval_data : DataIter or numpy.ndarray pair If `eval_set` is ``numpy.ndarray`` pair, it should be (`valid_data`, `valid_label`). eval_metric : metric.EvalMetric or str or callable The evaluation metric. Can be the name of an evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for print purposes. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'. Defaults to 'local', often no need to change for single machine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. """ model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch, epoch_size=epoch_size, optimizer=optimizer, initializer=initializer, **kwargs) model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, logger=logger, work_load_list=work_load_list, eval_end_callback=eval_end_callback, eval_batch_end_callback=eval_batch_end_callback) return model
apache-2.0
carthach/essentia
src/examples/python/musicbricks-tutorials/1-stft_analsynth.py
2
1197
import essentia import essentia.streaming as es # import matplotlib for plotting import matplotlib.pyplot as plt import numpy as np import sys # algorithm parameters framesize = 1024 hopsize = 256 # loop over all frames audioout = np.array(0) counter = 0 import matplotlib.pylab as plt # input and output files import os.path tutorial_dir = os.path.dirname(os.path.realpath(__file__)) inputFilename = os.path.join(tutorial_dir, 'singing-female.wav') outputFilename = os.path.join(tutorial_dir, 'singing-female-out-stft.wav') out = np.array(0) loader = es.MonoLoader(filename = inputFilename, sampleRate = 44100) pool = essentia.Pool() fcut = es.FrameCutter(frameSize = framesize, hopSize = hopsize, startFromZero = False); w = es.Windowing(type = "hann"); fft = es.FFT(size = framesize); ifft = es.IFFT(size = framesize); overl = es.OverlapAdd (frameSize = framesize, hopSize = hopsize, gain = 1./framesize ); awrite = es.MonoWriter (filename = outputFilename, sampleRate = 44100); loader.audio >> fcut.signal fcut.frame >> w.frame w.frame >> fft.frame fft.fft >> ifft.fft ifft.frame >> overl.frame overl.signal >> awrite.audio overl.signal >> (pool, 'audio') essentia.run(loader)
agpl-3.0
capitancambio/scikit-neuralnetwork
docs/conf.py
1
1948
# -*- coding: utf-8 -*- # # scikit-neuralnetwork documentation build configuration file, created by # sphinx-quickstart on Tue Mar 31 20:28:10 2015. import sys import os project = u'scikit-neuralnetwork' copyright = u'2015, scikit-neuralnetwork developers (BSD License)' # -- Overrides for modules ---------------------------------------------------- from mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): if name in ('BaseEstimator', 'RegressorMixin', 'ClassifierMixin'): return object return Mock() MOCK_MODULES = ['numpy', 'theano', 'sklearn.cross_validation', 'sklearn.base', 'sklearn.pipeline', 'sklearn.preprocessing', 'pylearn2.termination_criteria', 'pylearn2.datasets', 'pylearn2.models', 'pylearn2.training_algorithms', 'pylearn2.costs.mlp.dropout', 'pylearn2.space', 'pylearn2.training_algorithms.learning_rule'] for fullname in MOCK_MODULES: segments = [] for s in fullname.split('.'): segments.append(s) mod_name = ".".join(segments) sys.modules[mod_name] = Mock() # -- Configuration of documentation ------------------------------------------- sys.path.append(os.path.dirname(os.path.dirname(__file__)).encode('utf-8')) import sknn version = sknn.__version__ release = sknn.__version__ extensions = ['sphinx.ext.autosummary', 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'numpydoc'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' exclude_patterns = ['_build'] pygments_style = 'sphinx' todo_include_todos = False # -- Options for HTML output -------------------------------------------------- html_title = 'scikit-neuralnetwork documentation' # html_logo = 'img/logo.png' # html_favicon = 'img/favicon.ico' html_static_path = ['_static'] htmlhelp_basename = 'sknndoc'
bsd-3-clause
jmargeta/scikit-learn
doc/sphinxext/gen_rst.py
4
30674
""" Example generation for the scikit learn Generate the rst files for the examples by iterating over the python example files. Files that generate images should start with 'plot' """ from time import time import os import shutil import traceback import glob import sys from StringIO import StringIO import cPickle import re import urllib2 import gzip import posixpath try: from PIL import Image except: import Image import matplotlib matplotlib.use('Agg') import token import tokenize ############################################################################### # A tee object to redict streams to multiple outputs class Tee(object): def __init__(self, file1, file2): self.file1 = file1 self.file2 = file2 def write(self, data): self.file1.write(data) self.file2.write(data) def flush(self): self.file1.flush() self.file2.flush() ############################################################################### # Documentation link resolver objects def get_data(url): """Helper function to get data over http or from a local file""" if url.startswith('http://'): resp = urllib2.urlopen(url) encoding = resp.headers.dict.get('content-encoding', 'plain') data = resp.read() if encoding == 'plain': pass elif encoding == 'gzip': data = StringIO(data) data = gzip.GzipFile(fileobj=data).read() else: raise RuntimeError('unknown encoding') else: with open(url, 'r') as fid: data = fid.read() fid.close() return data def parse_sphinx_searchindex(searchindex): """Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index. """ def _select_block(str_in, start_tag, end_tag): """Select first block delimited by start_tag and end_tag""" start_pos = str_in.find(start_tag) if start_pos < 0: raise ValueError('start_tag not found') depth = 0 for pos in range(start_pos, len(str_in)): if str_in[pos] == start_tag: depth += 1 elif str_in[pos] == end_tag: depth -= 1 if depth == 0: break sel = str_in[start_pos + 1:pos] return sel def _parse_dict_recursive(dict_str): """Parse a dictionary from the search index""" dict_out = dict() pos_last = 0 pos = dict_str.find(':') while pos >= 0: key = dict_str[pos_last:pos] if dict_str[pos + 1] == '[': # value is a list pos_tmp = dict_str.find(']', pos + 1) if pos_tmp < 0: raise RuntimeError('error when parsing dict') value = dict_str[pos + 2: pos_tmp].split(',') # try to convert elements to int for i in range(len(value)): try: value[i] = int(value[i]) except ValueError: pass elif dict_str[pos + 1] == '{': # value is another dictionary subdict_str = _select_block(dict_str[pos:], '{', '}') value = _parse_dict_recursive(subdict_str) pos_tmp = pos + len(subdict_str) else: raise ValueError('error when parsing dict: unknown elem') key = key.strip('"') if len(key) > 0: dict_out[key] = value pos_last = dict_str.find(',', pos_tmp) if pos_last < 0: break pos_last += 1 pos = dict_str.find(':', pos_last) return dict_out # parse objects query = 'objects:' pos = searchindex.find(query) if pos < 0: raise ValueError('"objects:" not found in search index') sel = _select_block(searchindex[pos:], '{', '}') objects = _parse_dict_recursive(sel) # parse filenames query = 'filenames:' pos = searchindex.find(query) if pos < 0: raise ValueError('"filenames:" not found in search index') filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find(']')] filenames = [f.strip('"') for f in filenames.split(',')] return filenames, objects class SphinxDocLinkResolver(object): """ Resolve documentation links using searchindex.js generated by Sphinx Parameters ---------- doc_url : str The base URL of the project website. searchindex : str Filename of searchindex, relative to doc_url. extra_modules_test : list of str List of extra module names to test. relative : bool Return relative links (only useful for links to documentation of this package). """ def __init__(self, doc_url, searchindex='searchindex.js', extra_modules_test=None, relative=False): self.doc_url = doc_url self.relative = relative self._link_cache = {} self.extra_modules_test = extra_modules_test self._page_cache = {} if doc_url.startswith('http://'): if relative: raise ValueError('Relative links are only supported for local ' 'URLs (doc_url cannot start with "http://)"') searchindex_url = doc_url + '/' + searchindex else: searchindex_url = os.path.join(doc_url, searchindex) # detect if we are using relative links on a Windows system if os.name.lower() == 'nt' and not doc_url.startswith('http://'): if not relative: raise ValueError('You have to use relative=True for the local' 'package on a Windows system.') self._is_windows = True else: self._is_windows = False # download and initialize the search index sindex = get_data(searchindex_url) filenames, objects = parse_sphinx_searchindex(sindex) self._searchindex = dict(filenames=filenames, objects=objects) def _get_link(self, cobj): """Get a valid link, False if not found""" fname_idx = None full_name = cobj['module_short'] + '.' + cobj['name'] if full_name in self._searchindex['objects']: value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[value.keys()[0]] fname_idx = value[0] elif cobj['module_short'] in self._searchindex['objects']: value = self._searchindex['objects'][cobj['module_short']] if cobj['name'] in value.keys(): fname_idx = value[cobj['name']][0] if fname_idx is not None: fname = self._searchindex['filenames'][fname_idx] + '.html' if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if link in self._page_cache: html = self._page_cache[link] else: html = get_data(link) self._page_cache[link] = html # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] if self.extra_modules_test is not None: for mod in self.extra_modules_test: comb_names.append(mod + '.' + cobj['name']) url = False for comb_name in comb_names: if html.find(comb_name) >= 0: url = link + '#' + comb_name link = url else: link = False return link def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobi['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str | None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link ############################################################################### rst_template = """ .. _example_%(short_fname)s: %(docstring)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- """ plot_rst_template = """ .. _example_%(short_fname)s: %(docstring)s %(image_list)s %(stdout)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- **Total running time of the example:** %(time_elapsed) .2f seconds """ # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. HLIST_HEADER = """ .. rst-class:: horizontal """ HLIST_IMAGE_TEMPLATE = """ * .. image:: images/%s :scale: 47 """ SINGLE_IMAGE = """ .. image:: images/%s :align: center """ def extract_docstring(filename): """ Extract a module-level docstring, if any """ lines = file(filename).readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' tokens = tokenize.generate_tokens(iter(lines).next) for tok_type, tok_content, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif tok_type == 'STRING': docstring = eval(tok_content) # If the docstring is formatted with several paragraphs, extract # the first one: paragraphs = '\n'.join(line.rstrip() for line in docstring.split('\n')).split('\n\n') if len(paragraphs) > 0: first_par = paragraphs[0] break return docstring, first_par, erow + 1 + start_row def generate_example_rst(app): """ Generate the list of examples, as well as the contents of examples. """ root_dir = os.path.join(app.builder.srcdir, 'auto_examples') example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples') try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(example_dir): os.makedirs(example_dir) if not os.path.exists(root_dir): os.makedirs(root_dir) # we create an index.rst with all examples fhindex = file(os.path.join(root_dir, 'index.rst'), 'w') #Note: The sidebar button has been removed from the examples page for now # due to how it messes up the layout. Will be fixed at a later point fhindex.write("""\ .. raw:: html <style type="text/css"> div#sidebarbutton { display: none; } .figure { float: left; margin: 10px; width: auto; height: 200px; width: 180px; } .figure img { display: inline; } .figure .caption { width: 170px; text-align: center !important; } </style> Examples ======== .. _examples-index: """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery) for dir in sorted(os.listdir(example_dir)): if os.path.isdir(os.path.join(example_dir, dir)): generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery) fhindex.flush() def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery): """ Generate the rst file for an example directory. """ if not dir == '.': target_dir = os.path.join(root_dir, dir) src_dir = os.path.join(example_dir, dir) else: target_dir = root_dir src_dir = example_dir if not os.path.exists(os.path.join(src_dir, 'README.txt')): print 80 * '_' print ('Example directory %s does not have a README.txt file' % src_dir) print 'Skipping this directory' print 80 * '_' return fhindex.write(""" %s """ % file(os.path.join(src_dir, 'README.txt')).read()) if not os.path.exists(target_dir): os.makedirs(target_dir) def sort_key(a): # put last elements without a plot if not a.startswith('plot') and a.endswith('.py'): return 'zz' + a return a for fname in sorted(os.listdir(src_dir), key=sort_key): if fname.endswith('py'): generate_file_rst(fname, target_dir, src_dir, plot_gallery) thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png') link_name = os.path.join(dir, fname).replace(os.path.sep, '_') fhindex.write('.. figure:: %s\n' % thumb) if link_name.startswith('._'): link_name = link_name[2:] if dir != '.': fhindex.write(' :target: ./%s/%s.html\n\n' % (dir, fname[:-3])) else: fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3]) fhindex.write(""" :ref:`example_%s` .. toctree:: :hidden: %s/%s """ % (link_name, dir, fname[:-3])) fhindex.write(""" .. raw:: html <div style="clear: both"></div> """) # clear at the end of the section # modules for which we embed links into example code DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy'] def make_thumbnail(in_fname, out_fname, width, height): """Make a thumbnail with the same aspect ratio centered in an image with a given width and height """ img = Image.open(in_fname) width_in, height_in = img.size scale_w = width / float(width_in) scale_h = height / float(height_in) if height_in * scale_w <= height: scale = scale_w else: scale = scale_h width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (width, height), (255, 255, 255)) pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2) thumb.paste(img, pos_insert) thumb.save(out_fname) def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) except ImportError: # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name def generate_file_rst(fname, target_dir, src_dir, plot_gallery): """ Generate the rst file for a given example. """ base_image_name = os.path.splitext(fname)[0] image_fname = '%s_%%s.png' % base_image_name this_template = rst_template last_dir = os.path.split(src_dir)[-1] # to avoid leading . in file names, and wrong names in links if last_dir == '.' or last_dir == 'examples': last_dir = '' else: last_dir += '_' short_fname = last_dir + fname src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) # The following is a list containing all the figure names figure_list = [] image_dir = os.path.join(target_dir, 'images') thumb_dir = os.path.join(image_dir, 'thumb') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) image_path = os.path.join(image_dir, image_fname) stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name) time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name) thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png') time_elapsed = 0 if plot_gallery and fname.startswith('plot'): # generate the plot as png image if file name # starts with plot and if it is more recent than an # existing image. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if (not os.path.exists(first_image_file) or os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime): # We need to execute the code print 'plotting %s' % fname t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() # get variables so we can later add links to the documentation example_code_obj = {} for var_name, var in my_globals.iteritems(): if not hasattr(var, '__module__'): continue if not isinstance(var.__module__, basestring): continue if var.__module__.split('.')[0] not in DOCMODULES: continue # get the type as a string with other things stripped tstr = str(type(var)) tstr = (tstr[tstr.find('\'') + 1:tstr.rfind('\'')].split('.')[-1]) # get shortened module name module_short = get_short_module_name(var.__module__, tstr) cobj = {'name': tstr, 'module': var.__module__, 'module_short': module_short, 'obj_type': 'object'} example_code_obj[var_name] = cobj # find functions so we can later add links to the documentation funregex = re.compile('[\w.]+\(') with open(src_file, 'rt') as fid: for line in fid.readlines(): if line.startswith('#'): continue for match in funregex.findall(line): fun_name = match[:-1] try: exec('this_fun = %s' % fun_name, my_globals) except Exception as err: print 'extracting function failed' print err continue this_fun = my_globals['this_fun'] if not callable(this_fun): continue if not hasattr(this_fun, '__module__'): continue if not isinstance(this_fun.__module__, basestring): continue if (this_fun.__module__.split('.')[0] not in DOCMODULES): continue # get shortened module name fun_name_short = fun_name.split('.')[-1] module_short = get_short_module_name( this_fun.__module__, fun_name_short) cobj = {'name': fun_name_short, 'module': this_fun.__module__, 'module_short': module_short, 'obj_type': 'function'} example_code_obj[fun_name] = cobj fid.close() if len(example_code_obj) > 0: # save the dictionary, so we can later add hyperlinks codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: cPickle.dump(example_code_obj, fid, cPickle.HIGHEST_PROTOCOL) fid.close() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] for fig_num in (fig_mngr.num for fig_mngr in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()): # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. plt.figure(fig_num) plt.savefig(image_path % fig_num) figure_list.append(image_fname % fig_num) except: print 80 * '_' print '%s is not compiling:' % fname traceback.print_exc() print 80 * '_' finally: os.chdir(cwd) sys.stdout = orig_stdout print " - time elapsed : %.2g sec" % time_elapsed else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path % '[1-9]')] #for f in glob.glob(image_path % '*')] # generate thumb file this_template = plot_rst_template if os.path.exists(first_image_file): make_thumbnail(first_image_file, thumb_file, 180, 120) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 180, 120) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w') f.write(this_template % locals()) f.flush() def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" try: if exception is not None: return print 'Embedding documentation hyperlinks in examples..' # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) doc_resolvers['matplotlib'] = SphinxDocLinkResolver( 'http://matplotlib.org') doc_resolvers['numpy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/numpy-1.6.0') doc_resolvers['scipy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/scipy-0.11.0/reference') example_dir = os.path.join(app.builder.srcdir, 'auto_examples') html_example_dir = os.path.abspath(os.path.join(app.builder.outdir, 'auto_examples')) # patterns for replacement link_pattern = '<a href="%s">%s</a>' orig_pattern = '<span class="n">%s</span>' period = '<span class="o">.</span>' for dirpath, _, filenames in os.walk(html_example_dir): for fname in filenames: print '\tprocessing: %s' % fname full_fname = os.path.join(html_example_dir, dirpath, fname) subpath = dirpath[len(html_example_dir) + 1:] pickle_fname = os.path.join(example_dir, subpath, fname[:-5] + '_codeobj.pickle') if os.path.exists(pickle_fname): # we have a pickle file with the objects to embed links for with open(pickle_fname, 'rb') as fid: example_code_obj = cPickle.load(fid) fid.close() str_repl = {} # generate replacement strings with the links for name, cobj in example_code_obj.iteritems(): this_module = cobj['module'].split('.')[0] if this_module not in doc_resolvers: continue link = doc_resolvers[this_module].resolve(cobj, full_fname) if link is not None: parts = name.split('.') name_html = orig_pattern % parts[0] for part in parts[1:]: name_html += period + orig_pattern % part str_repl[name_html] = link_pattern % (link, name_html) # do the replacement in the html file if len(str_repl) > 0: with open(full_fname, 'rt') as fid: lines_in = fid.readlines() fid.close() with open(full_fname, 'wt') as fid: for line in lines_in: for name, link in str_repl.iteritems(): line = line.replace(name, link) fid.write(line) fid.close() except urllib2.HTTPError, e: print ("The following HTTP Error has occurred:\n") print e.code except urllib2.URLError, e: print ("\n...\n" "Warning: Embedding the documentation hyperlinks requires " "internet access.\nPlease check your network connection.\n" "Unable to continue embedding due to a URL Error: \n") print e.args print '[done]' def setup(app): app.connect('builder-inited', generate_example_rst) app.add_config_value('plot_gallery', True, 'html') # embed links after build is finished app.connect('build-finished', embed_code_links) # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The problem is, # the directory is never cleared. This means that each time you build # the docs, the number of images in the directory grows. # # This question has been asked on the sphinx development list, but there # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the # image build directory each time the docs are built. If sphinx # changes their layout between versions, this will not work (though # it should probably not cause a crash). Tested successfully # on Sphinx 1.0.7 build_image_dir = '_build/html/_images' if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) for filename in filelist: if filename.endswith('png'): os.remove(os.path.join(build_image_dir, filename))
bsd-3-clause
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/pandas/sparse/frame.py
7
30372
""" Data structures for sparse float data. Life is made simpler by dealing only with float64 data """ from __future__ import division # pylint: disable=E1101,E1103,W0231,E0202 from numpy import nan from pandas.compat import lmap from pandas import compat import numpy as np from pandas.types.missing import isnull, notnull from pandas.types.cast import _maybe_upcast from pandas.types.common import _ensure_platform_int from pandas.core.common import _try_sort from pandas.compat.numpy import function as nv from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.series import Series from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray, _default_index) import pandas.core.algorithms as algos from pandas.core.internals import (BlockManager, create_block_manager_from_arrays) import pandas.core.generic as generic from pandas.sparse.series import SparseSeries, SparseArray from pandas.util.decorators import Appender import pandas.core.ops as ops _shared_doc_kwargs = dict(klass='SparseDataFrame') class SparseDataFrame(DataFrame): """ DataFrame containing sparse floating point data in the form of SparseSeries objects Parameters ---------- data : same types as can be passed to DataFrame index : array-like, optional column : array-like, optional default_kind : {'block', 'integer'}, default 'block' Default sparse kind for converting Series to SparseSeries. Will not override SparseSeries passed into constructor default_fill_value : float Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in """ _constructor_sliced = SparseSeries _subtyp = 'sparse_frame' def __init__(self, data=None, index=None, columns=None, default_kind=None, default_fill_value=None, dtype=None, copy=False): # pick up the defaults from the Sparse structures if isinstance(data, SparseDataFrame): if index is None: index = data.index if columns is None: columns = data.columns if default_fill_value is None: default_fill_value = data.default_fill_value if default_kind is None: default_kind = data.default_kind elif isinstance(data, (SparseSeries, SparseArray)): if index is None: index = data.index if default_fill_value is None: default_fill_value = data.fill_value if columns is None and hasattr(data, 'name'): columns = [data.name] if columns is None: raise Exception("cannot pass a series w/o a name or columns") data = {columns[0]: data} if default_fill_value is None: default_fill_value = np.nan if default_kind is None: default_kind = 'block' self._default_kind = default_kind self._default_fill_value = default_fill_value if isinstance(data, dict): mgr = self._init_dict(data, index, columns) if dtype is not None: mgr = mgr.astype(dtype) elif isinstance(data, (np.ndarray, list)): mgr = self._init_matrix(data, index, columns) if dtype is not None: mgr = mgr.astype(dtype) elif isinstance(data, SparseDataFrame): mgr = self._init_mgr(data._data, dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, DataFrame): mgr = self._init_dict(data, data.index, data.columns) if dtype is not None: mgr = mgr.astype(dtype) elif isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif data is None: data = DataFrame() if index is None: index = Index([]) else: index = _ensure_index(index) if columns is None: columns = Index([]) else: for c in columns: data[c] = SparseArray(np.nan, index=index, kind=self._default_kind, fill_value=self._default_fill_value) mgr = to_manager(data, columns, index) if dtype is not None: mgr = mgr.astype(dtype) generic.NDFrame.__init__(self, mgr) @property def _constructor(self): return SparseDataFrame _constructor_sliced = SparseSeries def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: columns = _ensure_index(columns) data = dict((k, v) for k, v in compat.iteritems(data) if k in columns) else: columns = Index(_try_sort(list(data.keys()))) if index is None: index = extract_index(list(data.values())) sp_maker = lambda x: SparseArray(x, kind=self._default_kind, fill_value=self._default_fill_value, copy=True) sdict = DataFrame() for k, v in compat.iteritems(data): if isinstance(v, Series): # Force alignment, no copy necessary if not v.index.equals(index): v = v.reindex(index) if not isinstance(v, SparseSeries): v = sp_maker(v.values) elif isinstance(v, SparseArray): v = v.copy() else: if isinstance(v, dict): v = [v.get(i, nan) for i in index] v = sp_maker(v) sdict[k] = v # TODO: figure out how to handle this case, all nan's? # add in any other columns we want to have (completeness) nan_vec = np.empty(len(index)) nan_vec.fill(nan) for c in columns: if c not in sdict: sdict[c] = sp_maker(nan_vec) return to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): data = _prep_ndarray(data, copy=False) N, K = data.shape if index is None: index = _default_index(N) if columns is None: columns = _default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: %d vs. %d' % (len(columns), K)) if len(index) != N: raise ValueError('Index length mismatch: %d vs. %d' % (len(index), N)) data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)]) return self._init_dict(data, index, columns, dtype) def __array_wrap__(self, result): return self._constructor( result, index=self.index, columns=self.columns, default_kind=self._default_kind, default_fill_value=self._default_fill_value).__finalize__(self) def __getstate__(self): # pickling return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data, _default_fill_value=self._default_fill_value, _default_kind=self._default_kind) def _unpickle_sparse_frame_compat(self, state): """ original pickle format """ series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover from pandas.io.pickle import _unpickle_array columns = _unpickle_array(cols) else: columns = cols if not isinstance(idx, Index): # pragma: no cover from pandas.io.pickle import _unpickle_array index = _unpickle_array(idx) else: index = idx series_dict = DataFrame() for col, (sp_index, sp_values) in compat.iteritems(series): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) self._data = to_manager(series_dict, columns, index) self._default_fill_value = fv self._default_kind = kind def to_dense(self): """ Convert to dense DataFrame Returns ------- df : DataFrame """ data = dict((k, v.to_dense()) for k, v in compat.iteritems(self)) return DataFrame(data, index=self.index, columns=self.columns) def _apply_columns(self, func): """ get new SparseDataFrame applying func to each columns """ new_data = {} for col, series in compat.iteritems(self): new_data[col] = func(series) return self._constructor( data=new_data, index=self.index, columns=self.columns, default_fill_value=self.default_fill_value).__finalize__(self) def astype(self, dtype): return self._apply_columns(lambda x: x.astype(dtype)) def copy(self, deep=True): """ Make a copy of this SparseDataFrame """ result = super(SparseDataFrame, self).copy(deep=deep) result._default_fill_value = self._default_fill_value result._default_kind = self._default_kind return result @property def default_fill_value(self): return self._default_fill_value @property def default_kind(self): return self._default_kind @property def density(self): """ Ratio of non-sparse points to total (dense) data points represented in the frame """ tot_nonsparse = sum([ser.sp_index.npoints for _, ser in compat.iteritems(self)]) tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) def fillna(self, value=None, method=None, axis=0, inplace=False, limit=None, downcast=None): new_self = super(SparseDataFrame, self).fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast) if not inplace: self = new_self # set the fill value if we are filling as a scalar with nothing special # going on if (value is not None and value == value and method is None and limit is None): self._default_fill_value = value if not inplace: return self # ---------------------------------------------------------------------- # Support different internal representation of SparseDataFrame def _sanitize_column(self, key, value, **kwargs): """ Creates a new SparseArray from the input value. Parameters ---------- key : object value : scalar, Series, or array-like kwargs : dict Returns ------- sanitized_column : SparseArray """ sp_maker = lambda x, index=None: SparseArray( x, index=index, fill_value=self._default_fill_value, kind=self._default_kind) if isinstance(value, SparseSeries): clean = value.reindex(self.index).as_sparse_array( fill_value=self._default_fill_value, kind=self._default_kind) elif isinstance(value, SparseArray): if len(value) != len(self.index): raise AssertionError('Length of values does not match ' 'length of index') clean = value elif hasattr(value, '__iter__'): if isinstance(value, Series): clean = value.reindex(self.index) if not isinstance(value, SparseSeries): clean = sp_maker(clean) else: if len(value) != len(self.index): raise AssertionError('Length of values does not match ' 'length of index') clean = sp_maker(value) # Scalar else: clean = sp_maker(value, self.index) # always return a SparseArray! return clean def __getitem__(self, key): """ Retrieve column or slice from DataFrame """ if isinstance(key, slice): date_rng = self.index[key] return self.reindex(date_rng) elif isinstance(key, (np.ndarray, list, Series)): return self._getitem_array(key) else: return self._get_item_cache(key) @Appender(DataFrame.get_value.__doc__, indents=0) def get_value(self, index, col, takeable=False): if takeable is True: series = self._iget_item_cache(col) else: series = self._get_item_cache(col) return series.get_value(index, takeable=takeable) def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index Parameters ---------- index : row label col : column label value : scalar value takeable : interpret the index/col as indexers, default False Notes ----- This method *always* returns a new object. It is currently not particularly efficient (and potentially very expensive) but is provided for API compatibility with DataFrame Returns ------- frame : DataFrame """ dense = self.to_dense().set_value(index, col, value, takeable=takeable) return dense.to_sparse(kind=self._default_kind, fill_value=self._default_fill_value) def _slice(self, slobj, axis=0, kind=None): if axis == 0: new_index = self.index[slobj] new_columns = self.columns else: new_index = self.index new_columns = self.columns[slobj] return self.reindex(index=new_index, columns=new_columns) def xs(self, key, axis=0, copy=False): """ Returns a row (cross-section) from the SparseDataFrame as a Series object. Parameters ---------- key : some index contained in the index Returns ------- xs : Series """ if axis == 1: data = self[key] return data i = self.index.get_loc(key) data = self.take([i]).get_values()[0] return Series(data, index=self.columns) # ---------------------------------------------------------------------- # Arithmetic-related methods def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns if level is not None: raise NotImplementedError("'level' argument is not supported") if self.empty and other.empty: return self._constructor(index=new_index).__finalize__(self) new_data = {} new_fill_value = None if fill_value is not None: # TODO: be a bit more intelligent here for col in new_columns: if col in this and col in other: dleft = this[col].to_dense() dright = other[col].to_dense() result = dleft._binop(dright, func, fill_value=fill_value) result = result.to_sparse(fill_value=this[col].fill_value) new_data[col] = result else: for col in new_columns: if col in this and col in other: new_data[col] = func(this[col], other[col]) # if the fill values are the same use them? or use a valid one other_fill_value = getattr(other, 'default_fill_value', np.nan) if self.default_fill_value == other_fill_value: new_fill_value = self.default_fill_value elif np.isnan(self.default_fill_value) and not np.isnan( other_fill_value): new_fill_value = other_fill_value elif not np.isnan(self.default_fill_value) and np.isnan( other_fill_value): new_fill_value = self.default_fill_value return self._constructor(data=new_data, index=new_index, columns=new_columns, default_fill_value=new_fill_value ).__finalize__(self) def _combine_match_index(self, other, func, level=None, fill_value=None): new_data = {} if fill_value is not None: raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") new_index = self.index.union(other.index) this = self if self.index is not new_index: this = self.reindex(new_index) if other.index is not new_index: other = other.reindex(new_index) for col, series in compat.iteritems(this): new_data[col] = func(series.values, other.values) # fill_value is a function of our operator if isnull(other.fill_value) or isnull(self.default_fill_value): fill_value = np.nan else: fill_value = func(np.float64(self.default_fill_value), np.float64(other.fill_value)) return self._constructor( new_data, index=new_index, columns=self.columns, default_fill_value=fill_value).__finalize__(self) def _combine_match_columns(self, other, func, level=None, fill_value=None): # patched version of DataFrame._combine_match_columns to account for # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series, # where 3.0 is numpy.float64 and series is a SparseSeries. Still # possible for this to happen, which is bothersome if fill_value is not None: raise NotImplementedError("'fill_value' argument is not supported") if level is not None: raise NotImplementedError("'level' argument is not supported") new_data = {} union = intersection = self.columns if not union.equals(other.index): union = other.index.union(self.columns) intersection = other.index.intersection(self.columns) for col in intersection: new_data[col] = func(self[col], float(other[col])) return self._constructor( new_data, index=self.index, columns=union, default_fill_value=self.default_fill_value).__finalize__(self) def _combine_const(self, other, func, raise_on_error=True): return self._apply_columns(lambda x: func(x, other)) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False): if level is not None: raise TypeError('Reindex by level not supported for sparse') if self.index.equals(index): if copy: return self.copy() else: return self if len(self.index) == 0: return self._constructor( index=index, columns=self.columns).__finalize__(self) indexer = self.index.get_indexer(index, method, limit=limit) indexer = _ensure_platform_int(indexer) mask = indexer == -1 need_mask = mask.any() new_series = {} for col, series in self.iteritems(): if mask.all(): continue values = series.values # .take returns SparseArray new = values.take(indexer) if need_mask: new = new.values # convert integer to float if necessary. need to do a lot # more than that, handle boolean etc also new, fill_value = _maybe_upcast(new, fill_value=fill_value) np.putmask(new, mask, fill_value) new_series[col] = new return self._constructor( new_series, index=index, columns=self.columns, default_fill_value=self._default_fill_value).__finalize__(self) def _reindex_columns(self, columns, copy, level, fill_value, limit=None, takeable=False): if level is not None: raise TypeError('Reindex by level not supported for sparse') if notnull(fill_value): raise NotImplementedError("'fill_value' argument is not supported") if limit: raise NotImplementedError("'limit' argument is not supported") # TODO: fill value handling sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns) return self._constructor( sdict, index=self.index, columns=columns, default_fill_value=self._default_fill_value).__finalize__(self) def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit=None, copy=False, allow_dups=False): if method is not None or limit is not None: raise NotImplementedError("cannot reindex with a method or limit " "with sparse") if fill_value is None: fill_value = np.nan index, row_indexer = reindexers.get(0, (None, None)) columns, col_indexer = reindexers.get(1, (None, None)) if columns is None: columns = self.columns new_arrays = {} for col in columns: if col not in self: continue if row_indexer is not None: new_arrays[col] = algos.take_1d(self[col].get_values(), row_indexer, fill_value=fill_value) else: new_arrays[col] = self[col] return self._constructor(new_arrays, index=index, columns=columns).__finalize__(self) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): if on is not None: raise NotImplementedError("'on' keyword parameter is not yet " "implemented") return self._join_index(other, how, lsuffix, rsuffix) def _join_index(self, other, how, lsuffix, rsuffix): if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = SparseDataFrame( {other.name: other}, default_fill_value=self._default_fill_value) join_index = self.index.join(other.index, how=how) this = self.reindex(join_index) other = other.reindex(join_index) this, other = this._maybe_rename_join(other, lsuffix, rsuffix) from pandas import concat return concat([this, other], axis=1, verify_integrity=True) def _maybe_rename_join(self, other, lsuffix, rsuffix): to_rename = self.columns.intersection(other.columns) if len(to_rename) > 0: if not lsuffix and not rsuffix: raise ValueError('columns overlap but no suffix specified: %s' % to_rename) def lrenamer(x): if x in to_rename: return '%s%s' % (x, lsuffix) return x def rrenamer(x): if x in to_rename: return '%s%s' % (x, rsuffix) return x this = self.rename(columns=lrenamer) other = other.rename(columns=rrenamer) else: this = self return this, other def transpose(self, *args, **kwargs): """ Returns a DataFrame with the rows/columns switched. """ nv.validate_transpose(args, kwargs) return self._constructor( self.values.T, index=self.columns, columns=self.index, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) T = property(transpose) @Appender(DataFrame.count.__doc__) def count(self, axis=0, **kwds): if axis is None: axis = self._stat_axis_number return self.apply(lambda x: x.count(), axis=axis) def cumsum(self, axis=0, *args, **kwargs): """ Return SparseDataFrame of cumulative sums over requested axis. Parameters ---------- axis : {0, 1} 0 for row-wise, 1 for column-wise Returns ------- y : SparseDataFrame """ nv.validate_cumsum(args, kwargs) if axis is None: axis = self._stat_axis_number return self.apply(lambda x: x.cumsum(), axis=axis) @Appender(generic._shared_docs['isnull']) def isnull(self): return self._apply_columns(lambda x: x.isnull()) @Appender(generic._shared_docs['isnotnull']) def isnotnull(self): return self._apply_columns(lambda x: x.isnotnull()) def apply(self, func, axis=0, broadcast=False, reduce=False): """ Analogous to DataFrame.apply, for SparseDataFrame Parameters ---------- func : function Function to apply to each column axis : {0, 1, 'index', 'columns'} broadcast : bool, default False For aggregation functions, return object of same size with values propagated Returns ------- applied : Series or SparseDataFrame """ if not len(self.columns): return self axis = self._get_axis_number(axis) if isinstance(func, np.ufunc): new_series = {} for k, v in compat.iteritems(self): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied return self._constructor( new_series, index=self.index, columns=self.columns, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) else: if not broadcast: return self._apply_standard(func, axis, reduce=reduce) else: return self._apply_broadcast(func, axis) def applymap(self, func): """ Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Returns ------- applied : DataFrame """ return self.apply(lambda x: lmap(func, x)) def to_manager(sdf, columns, index): """ create and return the block manager from a dataframe of series, columns, index """ # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] return create_block_manager_from_arrays( [sdf[c] for c in columns], columns, axes) def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] nobs = sum(lengths) # this is pretty fast minor_labels = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). for _, series in compat.iteritems(frame): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_labels = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], labels=[major_labels, minor_labels], verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) return lp.sortlevel(level=0) def homogenize(series_dict): """ Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex corresponding to the locations where they all have data Parameters ---------- series_dict : dict or DataFrame Notes ----- Using the dumbest algorithm I could think of. Should put some more thought into this Returns ------- homogenized : dict of SparseSeries """ index = None need_reindex = False for _, series in compat.iteritems(series_dict): if not np.isnan(series.fill_value): raise TypeError('this method is only valid with NaN fill values') if index is None: index = series.sp_index elif not series.sp_index.equals(index): need_reindex = True index = index.intersect(series.sp_index) if need_reindex: output = {} for name, series in compat.iteritems(series_dict): if not series.sp_index.equals(index): series = series.sparse_reindex(index) output[name] = series else: output = series_dict return output # use unaccelerated ops for sparse objects ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False, **ops.frame_special_funcs)
mit
dingliumath/quant-econ
examples/illustrates_lln.py
7
1802
""" Filename: illustrates_lln.py Authors: John Stachurski and Thomas J. Sargent Visual illustration of the law of large numbers. """ import random import numpy as np from scipy.stats import t, beta, lognorm, expon, gamma, poisson import matplotlib.pyplot as plt n = 100 # == Arbitrary collection of distributions == # distributions = {"student's t with 10 degrees of freedom": t(10), "beta(2, 2)": beta(2, 2), "lognormal LN(0, 1/2)": lognorm(0.5), "gamma(5, 1/2)": gamma(5, scale=2), "poisson(4)": poisson(4), "exponential with lambda = 1": expon(1)} # == Create a figure and some axes == # num_plots = 3 fig, axes = plt.subplots(num_plots, 1, figsize=(10, 10)) # == Set some plotting parameters to improve layout == # bbox = (0., 1.02, 1., .102) legend_args = {'ncol': 2, 'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} plt.subplots_adjust(hspace=0.5) for ax in axes: # == Choose a randomly selected distribution == # name = random.choice(list(distributions.keys())) distribution = distributions.pop(name) # == Generate n draws from the distribution == # data = distribution.rvs(n) # == Compute sample mean at each n == # sample_mean = np.empty(n) for i in range(n): sample_mean[i] = np.mean(data[:i+1]) # == Plot == # ax.plot(list(range(n)), data, 'o', color='grey', alpha=0.5) axlabel = r'$\bar X_n$' + ' for ' + r'$X_i \sim$' + ' ' + name ax.plot(list(range(n)), sample_mean, 'g-', lw=3, alpha=0.6, label=axlabel) m = distribution.mean() ax.plot(list(range(n)), [m] * n, 'k--', lw=1.5, label=r'$\mu$') ax.vlines(list(range(n)), m, data, lw=0.2) ax.legend(**legend_args) plt.show()
bsd-3-clause
rcrowder/nupic
src/nupic/algorithms/monitor_mixin/monitor_mixin_base.py
10
7351
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ MonitorMixinBase class used in monitor mixin framework. Using a monitor mixin with your algorithm ----------------------------------------- 1. Create a subclass of your algorithm class, with the first parent being the corresponding Monitor class. For example, class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, TemporalMemory): pass 2. Create an instance of the monitored class and use that. instance = MonitoredTemporalMemory() # Run data through instance 3. Now you can call the following methods to print monitored data from of your instance: - instance.mmPrettyPrintMetrics(instance.mmGetDefaultMetrics()) - instance.mmPrettyPrintTraces(instance.mmGetDefaultTraces()) Each specific monitor also has specific methods you can call to extract data out of it. Adding data to a monitor mixin ----------------------------------------- 1. Create a variable for the data you want to capture in your specific monitor's `mmClearHistory` method. For example, self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells") Make sure you use the correct type of trace for your data. 2. Add data to this trace in your algorithm's `compute` method (or anywhere else). self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells())) 3. You can optionally add this trace as a default trace in `mmGetDefaultTraces`, or define a function to return that trace: def mmGetTracePredictiveCells(self): Any trace can be converted to a metric using the utility functions provided in the framework (see `metric.py`). Extending the functionality of the monitor mixin framework ----------------------------------------- If you want to add new types of traces and metrics, add them to `trace.py` and `metric.py`. You can also create new monitors by simply defining new classes that inherit from MonitorMixinBase. """ import abc import numpy from prettytable import PrettyTable from nupic.algorithms.monitor_mixin.plot import Plot class MonitorMixinBase(object): """ Base class for MonitorMixin. Each subclass will be a mixin for a particular algorithm. All arguments, variables, and methods in monitor mixin classes should be prefixed with "mm" (to avoid collision with the classes they mix in to). """ __metaclass__ = abc.ABCMeta def __init__(self, *args, **kwargs): """ Note: If you set the kwarg "mmName", then pretty-printing of traces and metrics will include the name you specify as a tag before every title. """ self.mmName = kwargs.get("mmName") if "mmName" in kwargs: del kwargs["mmName"] super(MonitorMixinBase, self).__init__(*args, **kwargs) # Mapping from key (string) => trace (Trace) self._mmTraces = None self._mmData = None self.mmClearHistory() def mmClearHistory(self): """ Clears the stored history. """ self._mmTraces = {} self._mmData = {} @staticmethod def mmPrettyPrintTraces(traces, breakOnResets=None): """ Returns pretty-printed table of traces. @param traces (list) Traces to print in table @param breakOnResets (BoolsTrace) Trace of resets to break table on @return (string) Pretty-printed table of traces. """ assert len(traces) > 0, "No traces found" table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces]) for i in xrange(len(traces[0].data)): if breakOnResets and breakOnResets.data[i]: table.add_row(["<reset>"] * (len(traces) + 1)) table.add_row([i] + [trace.prettyPrintDatum(trace.data[i]) for trace in traces]) return table.get_string().encode("utf-8") @staticmethod def mmPrettyPrintMetrics(metrics, sigFigs=5): """ Returns pretty-printed table of metrics. @param metrics (list) Traces to print in table @param sigFigs (int) Number of significant figures to print @return (string) Pretty-printed table of metrics. """ assert len(metrics) > 0, "No metrics found" table = PrettyTable(["Metric", "mean", "standard deviation", "min", "max", "sum", ]) for metric in metrics: table.add_row([metric.prettyPrintTitle()] + metric.getStats()) return table.get_string().encode("utf-8") def mmGetDefaultTraces(self, verbosity=1): """ Returns list of default traces. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default traces """ return [] def mmGetDefaultMetrics(self, verbosity=1): """ Returns list of default metrics. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default metrics """ return [] def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="", showReset=False, resetShading=0.25): """ Returns plot of the cell activity. Note that if many timesteps of activities are input, matplotlib's image interpolation may omit activities (columns in the image). @param cellTrace (list) a temporally ordered list of sets of cell activities @param cellCount (int) number of cells in the space being rendered @param activityType (string) type of cell activity being displayed @param title (string) an optional title for the figure @param showReset (bool) if true, the first set of cell activities after a reset will have a grayscale background @param resetShading (float) applicable if showReset is true, specifies the intensity of the reset background with 0.0 being white and 1.0 being black @return (Plot) plot """ plot = Plot(self, title) resetTrace = self.mmGetTraceResets().data data = numpy.zeros((cellCount, 1)) for i in xrange(len(cellTrace)): # Set up a "background" vector that is shaded or blank if showReset and resetTrace[i]: activity = numpy.ones((cellCount, 1)) * resetShading else: activity = numpy.zeros((cellCount, 1)) activeIndices = cellTrace[i] activity[list(activeIndices)] = 1 data = numpy.concatenate((data, activity), 1) plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title) return plot
agpl-3.0
jjo31/ATHAM-Fluidity
examples/backward_facing_step_2d/postprocessor_2d.py
4
12563
#!/usr/bin/env python import glob import sys import os import vtktools import numpy import pylab import re from matplotlib import rc def get_filelist(sample, start): def key(s): return int(s.split('_')[-1].split('.')[0]) list = glob.glob("*vtu") list = [l for l in list if 'check' not in l] vtu_nos = [float(s.split('_')[-1].split('.')[0]) for s in list] vals = zip(vtu_nos, list) vals.sort() unzip = lambda l:tuple(apply(zip,l)) vtu_nos, list = unzip(vals) shortlist = [] for file in list: try: os.stat(file) except: f_log.write("No such file: %s" % files) sys.exit(1) ##### Start at the (start+1)th file. ##### Add every nth file by taking integer multiples of n. vtu_no = float(file.split('_')[-1].split('.')[0]) if vtu_no > start: if (vtu_no%sample==0): shortlist.append(file) ##### Append final file if a large number of files remain. elif vtu_no==len(vtu_nos)-1 and (max(vtu_nos)-sample/4.0)>vtu_no: shortlist.append(file) return shortlist #### taken from http://www.codinghorror.com/blog/archives/001018.html ####### def tryint(s): try: return int(s) except: return s def alphanum_key(s): """ Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ return [ tryint(c) for c in re.split('([0-9]+)', s) ] def sort_nicely(l): """ Sort the given list in the way that humans expect. """ l.sort(key=alphanum_key) ############################################################################## # There are shorter and more elegant version of the above, but this works # on CX1, where this test might be run... ################################################################### # Reattachment length: def reattachment_length(filelist): print "Calculating reattachment point locations using change of x-velocity sign\n" nums=[]; results=[]; files = [] ##### check for no files if (len(filelist) == 0): print "No files!" sys.exit(1) for file in filelist: try: os.stat(file) except: print "No such file: %s" % file sys.exit(1) files.append(file) sort_nicely(files) for file in files: ##### Read in data from vtu datafile = vtktools.vtu(file) ##### Get time for plot: t = min(datafile.GetScalarField("Time")) print file, ', elapsed time = ', t ##### points near bottom surface, 0 < x < 20 pts=[]; no_pts = 82; offset = 0.01 x = 5.0 for i in range(1, no_pts): pts.append((x, offset, 0.0)) x += 0.25 pts = numpy.array(pts) ##### Get x-velocity on bottom boundary uvw = datafile.ProbeData(pts, "AverageVelocity") u = [] u = uvw[:,0] points = 0.0 for i in range(len(u)-1): ##### Hack to ignore division by zero entries in u. ##### All u should be nonzero away from boundary! if((u[i] / u[i+1]) < 0. and u[i+1] > 0. and not numpy.isinf(u[i] / u[i+1])): ##### interpolate between nodes. Correct for origin not at step. p = pts[i][0] + (pts[i+1][0]-pts[i][0]) * (0.0-u[i]) / (u[i+1]-u[i]) -5.0 print 'p ', p ##### Ignore spurious corner points if(p>2): points = p ##### We have our first point on this plane so... break print "reattachment point found at: ", points ##### Append actual reattachment point and time: results.append([points,t]) return results ######################################################################### # Velocity profiles: def meanvelo(file,x,y): print "\nRunning velocity profile script on files at times...\n" ##### create array of points. Correct for origin not at step. pts=[] for i in range(len(x)): for j in range(len(y)): pts.append([x[i]+5.0, y[j], 0.0]) pts=numpy.array(pts) profiles=numpy.zeros([x.size, y.size], float) datafile = vtktools.vtu(file) ##### Get x-velocity uvw = datafile.ProbeData(pts, "AverageVelocity") u = uvw[:,0] u=u.reshape([x.size,y.size]) for i in range(len(x)): umax = max(u[i,:]) u[i,:] = u[i,:]/umax profiles[:,:] = u print "\n...Finished writing data files.\n" return profiles ######################################################################### def plot_length(type,reattachment_length): ##### Plot time series of reattachment length using pylab(matplotlib) ##### Kim's (1978) experimental result, Re=132000 kim = numpy.zeros([len(reattachment_length[:,1])]) kim[:]=7.0 ##### Ilinca's (1997) best numerical result (adaptive mesh 2, 6960 points) ilinca = numpy.zeros([len(reattachment_length[:,1])]) ilinca[:]=6.21 plot1 = pylab.figure() pylab.title("Time series of reattachment length: Re=132000, "+str(type)) pylab.xlabel('Time (s)') pylab.ylabel('Reattachment Length (L/h)') pylab.plot(reattachment_length[:,1], reattachment_length[:,0], marker = 'o', markerfacecolor='white', markersize=6, markeredgecolor='black', linestyle="solid",color='blue') pylab.plot(reattachment_length[:,1], kim, linestyle="solid",color='black') pylab.plot(reattachment_length[:,1], ilinca, linestyle="solid",color='red') pylab.legend(("Fluidity","Kim expt.","Ilinca sim."), loc="best") rlmax = max(reattachment_length[:,0]) pylab.axis([0, reattachment_length[-1,1], 0, max(kim[-1],rlmax)+0.1]) pylab.savefig("../reattachment_length_kim_"+str(type)+".pdf") return ######################################################################### def plot_meanvelo(type,profiles,xarray,yarray): ##### Plot evolution of velocity profiles at different points behind step # get profiles from Ilinca's experimental/numerical data datafile = open('../Ilinca-data/Ilinca-U-expt-1.33.dat', 'r') print "reading in data from file: Ilinca-U-expt-1.33.dat" y1=[];U1=[] for line in datafile: U1.append(float(line.split()[0])) y1.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-num-1.33.dat', 'r') print "reading in data from file: Ilinca-U-num-1.33.dat" yn1=[];Un1=[] for line in datafile: Un1.append(float(line.split()[0])) yn1.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-expt-2.66.dat', 'r') print "reading in data from file: Ilinca-U-expt-2.66.dat" y3=[];U3=[] for line in datafile: U3.append(float(line.split()[0])) y3.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-num-2.66.dat', 'r') print "reading in data from file: Ilinca-U-num-2.66.dat" yn3=[];Un3=[] for line in datafile: Un3.append(float(line.split()[0])) yn3.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-expt-5.33.dat', 'r') print "reading in data from file: Ilinca-U-expt-5.33.dat" y5=[];U5=[] for line in datafile: U5.append(float(line.split()[0])) y5.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-num-5.33.dat', 'r') print "reading in data from file: Ilinca-U-num-5.33.dat" yn5=[];Un5=[] for line in datafile: Un5.append(float(line.split()[0])) yn5.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-expt-8.0.dat', 'r') print "reading in data from file: Ilinca-U-expt-8.0.dat" y8=[];U8=[] for line in datafile: U8.append(float(line.split()[0])) y8.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-num-8.0.dat', 'r') print "reading in data from file: Ilinca-U-num-8.0.dat" yn8=[];Un8=[] for line in datafile: Un8.append(float(line.split()[0])) yn8.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-expt-16.0.dat', 'r') print "reading in data from file: Ilinca-U-expt-16.0.dat" y16=[];U16=[] for line in datafile: U16.append(float(line.split()[0])) y16.append(float(line.split()[1])) datafile = open('../Ilinca-data/Ilinca-U-num-16.0.dat', 'r') print "reading in data from file: Ilinca-U-num-16.0.dat" yn16=[];Un16=[] for line in datafile: Un16.append(float(line.split()[0])) yn16.append(float(line.split()[1])) plot1 = pylab.figure(figsize = (20, 8)) pylab.suptitle("U-velocity profile: Re=132000, "+str(type), fontsize=20) size = 15 ax = pylab.subplot(151) leg_end = [] ax.plot(profiles[0,:],yarray, linestyle="solid",color='blue', marker = 'o', markerfacecolor='white', markersize=8, markeredgecolor='black') ax.plot(U1,y1, linestyle='none',color='black', marker = 'x', markerfacecolor='black', markersize=8) ax.plot(Un1,yn1, linestyle="solid",color="red") leg_end.append("Fluidity") leg_end.append("Kim expt.") leg_end.append("Ilinca sim.") pylab.legend((leg_end), loc="lower right") leg = pylab.gca().get_legend() ltext = leg.get_texts() pylab.setp(ltext, fontsize = 18, color = 'black') frame=leg.get_frame() frame.set_fill(False) frame.set_visible(False) ax.set_title('(a) x/h='+str(xarray[0]), fontsize=16) for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(size) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(size) bx = pylab.subplot(152, sharex=ax, sharey=ax) bx.plot(profiles[1,:],yarray, linestyle="solid",color='blue', marker = 'o', markerfacecolor='white', markersize=8, markeredgecolor='black') bx.plot(U3,y3, linestyle='none',color='black', marker = 'x', markerfacecolor='black', markersize=8) bx.plot(Un3,yn3, linestyle="solid",color='red') bx.set_title('(b) x/h='+str(xarray[1]), fontsize=16) for tick in bx.xaxis.get_major_ticks(): tick.label1.set_fontsize(size) pylab.setp(bx.get_yticklabels(), visible=False) cx = pylab.subplot(153, sharex=ax, sharey=ax) cx.plot(profiles[2,:],yarray, linestyle="solid",color='blue', marker = 'o', markerfacecolor='white', markersize=8, markeredgecolor='black') cx.plot(U5,y5, linestyle='none',color='black', marker = 'x', markerfacecolor='black', markersize=8) cx.plot(Un5,yn5, linestyle="solid",color='red') cx.set_title('(c) x/h='+str(xarray[2]), fontsize=16) for tick in cx.xaxis.get_major_ticks(): tick.label1.set_fontsize(size) pylab.setp(cx.get_yticklabels(), visible=False) dx = pylab.subplot(154, sharex=ax, sharey=ax) dx.plot(profiles[3,:],yarray, linestyle="solid",color='blue', marker = 'o', markerfacecolor='white', markersize=8, markeredgecolor='black') dx.plot(U8,y8, linestyle='none',color='black', marker = 'x', markerfacecolor='black', markersize=8) dx.plot(Un8,yn8, linestyle="solid",color='red') dx.set_title('(d) x/h='+str(xarray[3]), fontsize=16) for tick in dx.xaxis.get_major_ticks(): tick.label1.set_fontsize(size) pylab.setp(dx.get_yticklabels(), visible=False) ex = pylab.subplot(155, sharex=ax, sharey=ax) ex.plot(profiles[4,:],yarray, linestyle="solid",color='blue', marker = 'o', markerfacecolor='white', markersize=8, markeredgecolor='black') ex.plot(U16,y16, linestyle='none',color='black', marker = 'x', markerfacecolor='black', markersize=8) ex.plot(Un16,yn16, linestyle="solid",color='red') ex.set_title('(e) x/h='+str(xarray[4]), fontsize=16) for tick in ex.xaxis.get_major_ticks(): tick.label1.set_fontsize(size) pylab.setp(ex.get_yticklabels(), visible=False) pylab.axis([-0.25, 1.15, 0., 3.]) cx.set_xlabel('Normalised U-velocity (U/Umax)', fontsize=24) ax.set_ylabel('y', fontsize=24) pylab.savefig("../velocity_profiles_kim_"+str(type)+".pdf") return ######################################################################### def main(): ##### Which run is being processed? type = sys.argv[1] ##### Only process every nth file: filelist = get_filelist(sample=5, start=0) ##### Call reattachment_length function reatt_length = numpy.array(reattachment_length(filelist)) av_length = sum(reatt_length[:,0]) / len(reatt_length[:,0]) numpy.save("reattachment_length_kim_"+str(type), reatt_length) plot_length(type,reatt_length) ##### Points to generate profiles: xarray = numpy.array([1.33,2.66,5.33,8.0,16.0]) yarray = numpy.array([0.01,0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9, 1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0,2.1,2.2,2.3,2.4, 2.5,2.6,2.7,2.8,2.9,2.95,2.96,2.97,2.98,2.99,3.0]) ##### Call meanvelo function profiles = meanvelo(filelist[-1], xarray, yarray) numpy.save("velocity_profiles_kim_"+str(type), profiles) plot_meanvelo(type,profiles,xarray,yarray) pylab.show() print "\nAll done.\n" if __name__ == "__main__": sys.exit(main())
lgpl-2.1
jenfly/monsoon-onset
scripts/save-dailyrel-regression.py
1
4364
import sys sys.path.append('/home/jwalker/dynamics/python/atmos-tools') sys.path.append('/home/jwalker/dynamics/python/atmos-read') import xarray as xray import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import collections import pandas as pd import atmos as atm import utils mpl.rcParams['font.size'] = 10 # ---------------------------------------------------------------------- onset_nm = 'CHP_MFC' years = np.arange(1979, 2015) datadir = atm.homedir() + 'datastore/merra/analysis/' varnms = ['U200', 'V200', 'T200', 'H200', 'U850', 'V850', 'T850', 'H850', 'THETA950', 'THETA_E950', 'QV950', 'T950', 'HFLUX', 'EFLUX', 'EVAP', 'precip'] regdays = [-60, -30, 0, 30, 60] seasons = ['JJAS', 'SSN'] lon1, lon2 = 60, 100 nroll = 5 datafiles = collections.OrderedDict() filestr = datadir + 'merra_%s_dailyrel_%s_%d.nc' for nm in varnms: datafiles[nm] = [filestr % (nm, onset_nm, yr) for yr in years] # ---------------------------------------------------------------------- # For each variable, read data, compute regressions and save to files def get_filenames(datadir, varnm, onset_nm, years, lon1, lon2, nroll): if nroll is not None: varnm = varnm + '_nroll%d' % nroll lonstr = atm.latlon_str(lon1, lon2, 'lon') yearstr = '_%d-%d.nc' % (min(years), max(years)) filestr = datadir + 'merra_%s_reg_%s_onset_' + onset_nm + yearstr filenames = {} filenames['latlon'] = filestr % (varnm, 'latlon') filenames['sector'] = filestr % (varnm, lonstr) return filenames def season_days(season, year, d_onset, d_retreat): if season == 'SSN': days = range(0, d_retreat - d_onset + 1) else: days = atm.season_days(season, atm.isleap(year)) - d_onset return days def ssn_average(var, onset, retreat, season): years = var['year'].values for y, year in enumerate(years): days = season_days(season, year, onset.values[y], retreat.values[y]) var_yr = atm.subset(var, {'year' : (year, year)}, squeeze=False) var_yr = var_yr.sel(dayrel=days).mean(dim='dayrel') if y == 0: var_out = var_yr else: var_out = xray.concat([var_out, var_yr], dim='year') return var_out def get_data(varnm, datafiles, regdays, seasons, lon1, lon2, nroll=None): var, onset, retreat = utils.load_dailyrel(datafiles[varnm]) if nroll is not None: var = atm.rolling_mean(var, nroll, axis=1, center=True) # Seasonal averages and daily lat-lon data data = xray.Dataset() for season in seasons: key = varnm + '_' + season data[key] = ssn_average(var, onset, retreat, season) # Daily data on regdays data[varnm + '_DAILY'] = var.sel(dayrel=regdays) # Sector mean data var_sector = atm.dim_mean(var, 'lon', lon1, lon2) alldata = {'data_latlon' : data, 'var_sector' : var_sector, 'onset' : onset, 'retreat' : retreat} return alldata def regress_data(alldata, indname='onset', axis=0): data = alldata['data_latlon'] var_sector = alldata['var_sector'] index = alldata[indname] print('Computing regressions') reg_data = xray.Dataset() for nm in data.data_vars: print(nm) reg = atm.regress_field(data[nm], index, axis) for nm2 in reg.data_vars: reg_data[nm + '_' + nm2] = reg[nm2] reg_sector = atm.regress_field(var_sector, index, axis) reg_out = {'latlon' : reg_data, 'sector' : reg_sector} return reg_out def process_one(varnm, datafiles, years, regdays, seasons, lon1, lon2, datadir, indname='onset', reg_axis=0, nroll=None): print('Processing ' + varnm) alldata = get_data(varnm, datafiles, regdays, seasons, lon1, lon2) reg_data = regress_data(alldata, indname=indname, axis=reg_axis) savefiles = get_filenames(datadir, varnm, onset_nm, years, lon1, lon2, nroll) for nm in reg_data: filenm = savefiles[nm] print('Saving to ' + filenm) ds = reg_data[nm] ds.to_netcdf(filenm) return alldata, reg_data # Iterate over each variable for varnm in varnms: data, reg_data = process_one(varnm, datafiles, years, regdays, seasons, lon1, lon2, datadir, indname='onset', reg_axis=0, nroll=nroll)
mit
liangz0707/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
127
7477
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
JQIamo/artiq
doc/manual/conf.py
1
9407
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # ARTIQ documentation build configuration file, created by # sphinx-quickstart on Thu Sep 18 16:51:53 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx_rtd_theme from unittest.mock import MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): if name == "_mock_methods": return None return Mock() mock_modules = ["artiq.gui.moninj", "artiq.gui.waitingspinnerwidget", "artiq.gui.flowlayout", "quamash", "pyqtgraph", "matplotlib", "numpy", "dateutil", "dateutil.parser", "prettytable", "PyQt5", "h5py", "serial", "scipy", "scipy.interpolate", "asyncserial", "llvmlite_artiq", "Levenshtein", "aiohttp"] for module in mock_modules: sys.modules[module] = Mock() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../..')) from artiq import __version__ as artiq_version # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinxarg.ext', 'sphinxcontrib.wavedrom', # see also below for config ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'ARTIQ' copyright = '2014-2017, M-Labs Limited' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = artiq_version.split('+', 1)[0] # The full version, including alpha/beta/rc tags. release = artiq_version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '../logo/artiq_white.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ARTIQdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ARTIQ.tex', 'ARTIQ Documentation', 'M-Labs / NIST Ion Storage Group', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'artiq', 'ARTIQ Documentation', ['M-Labs / NIST Ion Storage Group'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ARTIQ', 'ARTIQ Documentation', 'M-Labs / NIST Ion Storage Group', 'ARTIQ', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for sphinxcontrib-wavedrom ----------------------------------- offline_skin_js_path = '_static/default.js' offline_wavedrom_js_path = '_static/WaveDrom.js'
lgpl-3.0
rohanp/scikit-learn
examples/linear_model/plot_lasso_coordinate_descent_path.py
42
2944
""" ===================== Lasso and Elastic Net ===================== Lasso and elastic net (L1 and L2 penalisation) implemented using a coordinate descent. The coefficients can be forced to be positive. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause from itertools import cycle import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import lasso_path, enet_path from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter) # Compute paths eps = 5e-3 # the smaller it is the longer is the path print("Computing regularization path using the lasso...") alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False) print("Computing regularization path using the positive lasso...") alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path( X, y, eps, positive=True, fit_intercept=False) print("Computing regularization path using the elastic net...") alphas_enet, coefs_enet, _ = enet_path( X, y, eps=eps, l1_ratio=0.8, fit_intercept=False) print("Computing regularization path using the positve elastic net...") alphas_positive_enet, coefs_positive_enet, _ = enet_path( X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False) # Display results plt.figure(1) ax = plt.gca() colors = cycle(['b', 'r', 'g', 'c', 'k']) neg_log_alphas_lasso = -np.log10(alphas_lasso) neg_log_alphas_enet = -np.log10(alphas_enet) for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and Elastic-Net Paths') plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left') plt.axis('tight') plt.figure(2) ax = plt.gca() neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c) l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Lasso and positive Lasso') plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left') plt.axis('tight') plt.figure(3) ax = plt.gca() neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet) for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c) l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c) plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Elastic-Net and positive Elastic-Net') plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), loc='lower left') plt.axis('tight') plt.show()
bsd-3-clause
jm-begon/scikit-learn
examples/applications/plot_prediction_latency.py
234
11277
""" ================== Prediction Latency ================== This is an example showing the prediction latency of various scikit-learn estimators. The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode. The plots represent the distribution of the prediction latency as a boxplot. """ # Authors: Eustache Diemert <eustache@diemert.fr> # License: BSD 3 clause from __future__ import print_function from collections import defaultdict import time import gc import numpy as np import matplotlib.pyplot as plt from scipy.stats import scoreatpercentile from sklearn.datasets.samples_generator import make_regression from sklearn.ensemble.forest import RandomForestRegressor from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.stochastic_gradient import SGDRegressor from sklearn.svm.classes import SVR def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=np.float) for i in range(n_instances): instance = X_test[i, :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=np.float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression(n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] idx = np.arange(n_train) np.random.seed(13) np.random.shuffle(idx) X_train = X_train[idx] y_train = y_train[idx] std = X_train.std(axis=0) mean = X_train.mean(axis=0) X_train = (X_train - mean) / std X_test = (X_test - mean) / std std = y_train.std(axis=0) mean = y_train.mean(axis=0) y_train = (y_train - mean) / std y_test = (y_test - mean) / std gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test def boxplot_runtimes(runtimes, pred_type, configuration): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ fig, ax1 = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes, ) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ( pred_type.capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() def benchmark(configuration): """Run the whole benchmark.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) stats = {} for estimator_conf in configuration['estimators']: print("Benchmarking", estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() a, b = benchmark_estimator(estimator_conf['instance'], X_test) stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration[ 'estimators']] runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'atomic', configuration) runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'], configuration) def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes, percentile) return percentiles def plot_n_features_influence(percentiles, percentile): fig, ax1 = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] for i, cls_name in enumerate(percentiles.keys()): x = np.array(sorted([n for n in percentiles[cls_name].keys()])) y = np.array([percentiles[cls_name][n] for n in x]) plt.plot(x, y, color=colors[i], ) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Evolution of Prediction Time with #Features') ax1.set_xlabel('#Features') ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile) plt.show() def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config['instance'].predict(X_test[0]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs def plot_benchmark_throughput(throughputs, configuration): fig, ax = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] cls_values = [throughputs[estimator_conf['name']] for estimator_conf in configuration['estimators']] plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors) ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs))) ax.set_xticklabels(cls_infos, fontsize=10) ymax = max(cls_values) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('Throughput (predictions/sec)') ax.set_title('Prediction Throughput for different estimators (%d ' 'features)' % configuration['n_features']) plt.show() ############################################################################### # main code start_time = time.time() # benchmark bulk/atomic prediction speed for various regressors configuration = { 'n_train': int(1e3), 'n_test': int(1e2), 'n_features': int(1e2), 'estimators': [ {'name': 'Linear Model', 'instance': SGDRegressor(penalty='elasticnet', alpha=0.01, l1_ratio=0.25, fit_intercept=True), 'complexity_label': 'non-zero coefficients', 'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)}, {'name': 'RandomForest', 'instance': RandomForestRegressor(), 'complexity_label': 'estimators', 'complexity_computer': lambda clf: clf.n_estimators}, {'name': 'SVR', 'instance': SVR(kernel='rbf'), 'complexity_label': 'support vectors', 'complexity_computer': lambda clf: len(clf.support_vectors_)}, ] } benchmark(configuration) # benchmark n_features influence on prediction speed percentile = 90 percentiles = n_feature_influence({'ridge': Ridge()}, configuration['n_train'], configuration['n_test'], [100, 250, 500], percentile) plot_n_features_influence(percentiles, percentile) # benchmark throughput throughputs = benchmark_throughputs(configuration) plot_benchmark_throughput(throughputs, configuration) stop_time = time.time() print("example run in %.2fs" % (stop_time - start_time))
bsd-3-clause
ndingwall/scikit-learn
sklearn/model_selection/tests/test_split.py
6
63286
"""Test the split module""" import warnings import pytest import numpy as np from scipy.sparse import coo_matrix, csc_matrix, csr_matrix from scipy import stats from scipy.special import comb from itertools import combinations from itertools import combinations_with_replacement from itertools import permutations from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_raises_regexp from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import ignore_warnings from sklearn.utils.validation import _num_samples from sklearn.utils._mocking import MockDataFrame from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GroupKFold from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import PredefinedSplit from sklearn.model_selection import check_cv from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.linear_model import Ridge from sklearn.model_selection._split import _validate_shuffle_split from sklearn.model_selection._split import _build_repr from sklearn.model_selection._split import _yields_constant_splits from sklearn.datasets import load_digits from sklearn.datasets import make_classification from sklearn.svm import SVC X = np.ones(10) y = np.arange(10) // 2 P_sparse = coo_matrix(np.eye(5)) test_groups = ( np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3']) digits = load_digits() @ignore_warnings def test_cross_validator_with_default_params(): n_samples = 4 n_unique_groups = 4 n_splits = 2 p = 2 n_shuffle_splits = 10 # (the default value) X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) X_1d = np.array([1, 2, 3, 4]) y = np.array([1, 1, 2, 2]) groups = np.array([1, 2, 3, 4]) loo = LeaveOneOut() lpo = LeavePOut(p) kf = KFold(n_splits) skf = StratifiedKFold(n_splits) lolo = LeaveOneGroupOut() lopo = LeavePGroupsOut(p) ss = ShuffleSplit(random_state=0) ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 loo_repr = "LeaveOneOut()" lpo_repr = "LeavePOut(p=2)" kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" lolo_repr = "LeaveOneGroupOut()" lopo_repr = "LeavePGroupsOut(n_groups=2)" ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, " "test_size=None, train_size=None)") ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits, n_unique_groups, comb(n_unique_groups, p), n_shuffle_splits, 2] for i, (cv, cv_repr) in enumerate(zip( [loo, lpo, kf, skf, lolo, lopo, ss, ps], [loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr, ss_repr, ps_repr])): # Test if get_n_splits works correctly assert n_splits_expected[i] == cv.get_n_splits(X, y, groups) # Test if the cross-validator works as expected even if # the data is 1d np.testing.assert_equal(list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups))) # Test that train, test indices returned are integers for train, test in cv.split(X, y, groups): assert np.asarray(train).dtype.kind == 'i' assert np.asarray(test).dtype.kind == 'i' # Test if the repr works without any errors assert cv_repr == repr(cv) # ValueError for get_n_splits methods msg = "The 'X' parameter should not be None." assert_raise_message(ValueError, msg, loo.get_n_splits, None, y, groups) assert_raise_message(ValueError, msg, lpo.get_n_splits, None, y, groups) def test_2d_y(): # smoke test for 2d y and multi-label n_samples = 30 rng = np.random.RandomState(1) X = rng.randint(0, 3, size=(n_samples, 2)) y = rng.randint(0, 3, size=(n_samples,)) y_2d = y.reshape(-1, 1) y_multilabel = rng.randint(0, 2, size=(n_samples, 3)) groups = rng.randint(0, 3, size=(n_samples,)) splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(), RepeatedKFold(), RepeatedStratifiedKFold(), ShuffleSplit(), StratifiedShuffleSplit(test_size=.5), GroupShuffleSplit(), LeaveOneGroupOut(), LeavePGroupsOut(n_groups=2), GroupKFold(n_splits=3), TimeSeriesSplit(), PredefinedSplit(test_fold=groups)] for splitter in splitters: list(splitter.split(X, y, groups)) list(splitter.split(X, y_2d, groups)) try: list(splitter.split(X, y_multilabel, groups)) except ValueError as e: allowed_target_types = ('binary', 'multiclass') msg = "Supported target types are: {}. Got 'multilabel".format( allowed_target_types) assert msg in str(e) def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert train.intersection(test) == set() if n_samples is not None: # Check that the union of train an test split cover all the indices assert train.union(test) == set(range(n_samples)) def check_cv_coverage(cv, X, y, groups, expected_n_splits): n_samples = _num_samples(X) # Check that a all the samples appear at least once in a test fold assert cv.get_n_splits(X, y, groups) == expected_n_splits collected_test_samples = set() iterations = 0 for train, test in cv.split(X, y, groups): check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert iterations == expected_n_splits if n_samples is not None: assert collected_test_samples == set(range(n_samples)) def test_kfold_valueerrors(): X1 = np.array([[1, 2], [3, 4], [5, 6]]) X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) # Check that errors are raised if there is not enough samples (ValueError, next, KFold(4).split(X1)) # Check that a warning is raised if the least populated class has too few # members. y = np.array([3, 3, -1, -1, 3]) skf_3 = StratifiedKFold(3) assert_warns_message(Warning, "The least populated class", next, skf_3.split(X2, y)) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split with warnings.catch_warnings(): warnings.simplefilter("ignore") check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) # Check that errors are raised if all n_groups for individual # classes are less than n_splits. y = np.array([3, 3, -1, -1, 2]) assert_raises(ValueError, next, skf_3.split(X2, y)) # Error when number of folds is <= 1 assert_raises(ValueError, KFold, 0) assert_raises(ValueError, KFold, 1) error_string = ("k-fold cross-validation requires at least one" " train/test split") assert_raise_message(ValueError, error_string, StratifiedKFold, 0) assert_raise_message(ValueError, error_string, StratifiedKFold, 1) # When n_splits is not integer: assert_raises(ValueError, KFold, 1.5) assert_raises(ValueError, KFold, 2.0) assert_raises(ValueError, StratifiedKFold, 1.5) assert_raises(ValueError, StratifiedKFold, 2.0) # When shuffle is not a bool: assert_raises(TypeError, KFold, n_splits=4, shuffle=None) def test_kfold_indices(): # Check all indices are returned in the test folds X1 = np.ones(18) kf = KFold(3) check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) # Check all indices are returned in the test folds even when equal-sized # folds are not possible X2 = np.ones(17) kf = KFold(3) check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) # Check if get_n_splits returns the number of folds assert 5 == KFold(5).get_n_splits(X2) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] splits = KFold(2).split(X2[:-1]) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = KFold(2).split(X2) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible X, y = np.ones(4), [1, 1, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) # Check if get_n_splits returns the number of folds assert 5 == StratifiedKFold(5).get_n_splits(X, y) # Make sure string labels are also supported X = np.ones(7) y1 = ['1', '1', '1', '0', '0', '0', '0'] y2 = [1, 1, 1, 0, 0, 0, 0] np.testing.assert_equal( list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2))) # Check equivalence to KFold y = [0, 1, 0, 1, 0, 1, 0, 1] X = np.ones_like(y) np.testing.assert_equal( list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y))) @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('k', [4, 5, 6, 7, 8, 9, 10]) def test_stratified_kfold_ratios(k, shuffle): # Check that stratified kfold preserves class ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 X = np.ones(n_samples) y = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) distr = np.bincount(y) / len(y) test_sizes = [] random_state = None if not shuffle else 0 skf = StratifiedKFold(k, random_state=random_state, shuffle=shuffle) for train, test in skf.split(X, y): assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) test_sizes.append(len(test)) assert np.ptp(test_sizes) <= 1 @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('k', [4, 6, 7]) def test_stratified_kfold_label_invariance(k, shuffle): # Check that stratified kfold gives the same indices regardless of labels n_samples = 100 y = np.array([2] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) X = np.ones(len(y)) def get_splits(y): random_state = None if not shuffle else 0 return [(list(train), list(test)) for train, test in StratifiedKFold(k, random_state=random_state, shuffle=shuffle).split(X, y)] splits_base = get_splits(y) for perm in permutations([0, 1, 2]): y_perm = np.take(perm, y) splits_perm = get_splits(y_perm) assert splits_perm == splits_base def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for i in range(11, 17): kf = KFold(5).split(X=np.ones(i)) sizes = [len(test) for _, test in kf] assert (np.max(sizes) - np.min(sizes)) <= 1 assert np.sum(sizes) == i def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on X = np.ones(17) y = [0] * 3 + [1] * 14 for shuffle in (True, False): cv = StratifiedKFold(3, shuffle=shuffle) for i in range(11, 17): skf = cv.split(X[:i], y[:i]) sizes = [len(test) for _, test in skf] assert (np.max(sizes) - np.min(sizes)) <= 1 assert np.sum(sizes) == i def test_shuffle_kfold(): # Check the indices are shuffled properly kf = KFold(3) kf2 = KFold(3, shuffle=True, random_state=0) kf3 = KFold(3, shuffle=True, random_state=1) X = np.ones(300) all_folds = np.zeros(300) for (tr1, te1), (tr2, te2), (tr3, te3) in zip( kf.split(X), kf2.split(X), kf3.split(X)): for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): # Assert that there is no complete overlap assert len(np.intersect1d(tr_a, tr_b)) != len(tr1) # Set all test indices in successive iterations of kf2 to 1 all_folds[te2] = 1 # Check that all indices are returned in the different test folds assert sum(all_folds) == 300 def test_shuffle_kfold_stratifiedkfold_reproducibility(): X = np.ones(15) # Divisible by 3 y = [0] * 7 + [1] * 8 X2 = np.ones(16) # Not divisible by 3 y2 = [0] * 8 + [1] * 8 # Check that when the shuffle is True, multiple split calls produce the # same split when random_state is int kf = KFold(3, shuffle=True, random_state=0) skf = StratifiedKFold(3, shuffle=True, random_state=0) for cv in (kf, skf): np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y))) np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2))) # Check that when the shuffle is True, multiple split calls often # (not always) produce different splits when random_state is # RandomState instance or None kf = KFold(3, shuffle=True, random_state=np.random.RandomState(0)) skf = StratifiedKFold(3, shuffle=True, random_state=np.random.RandomState(0)) for cv in (kf, skf): for data in zip((X, X2), (y, y2)): # Test if the two splits are different cv for (_, test_a), (_, test_b) in zip(cv.split(*data), cv.split(*data)): # cv.split(...) returns an array of tuples, each tuple # consisting of an array with train indices and test indices # Ensure that the splits for data are not same # when random state is not set with pytest.raises(AssertionError): np.testing.assert_array_equal(test_a, test_b) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage X_40 = np.ones(40) y = [0] * 20 + [1] * 20 kf0 = StratifiedKFold(5, shuffle=True, random_state=0) kf1 = StratifiedKFold(5, shuffle=True, random_state=1) for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): assert set(test0) != set(test1) check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) # Ensure that we shuffle each class's samples with different # random_state in StratifiedKFold # See https://github.com/scikit-learn/scikit-learn/pull/13124 X = np.arange(10) y = [0] * 5 + [1] * 5 kf1 = StratifiedKFold(5, shuffle=True, random_state=0) kf2 = StratifiedKFold(5, shuffle=True, random_state=1) test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)]) test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)]) assert test_set1 != test_set2 def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact by computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.93) than that the non # shuffling variant (around 0.81). X, y = digits.data[:600], digits.target[:600] model = SVC(C=10, gamma=0.005) n_splits = 3 cv = KFold(n_splits=n_splits, shuffle=False) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert 0.92 > mean_score assert mean_score > 0.80 # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = KFold(n_splits, shuffle=True, random_state=0) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert mean_score > 0.92 cv = KFold(n_splits, shuffle=True, random_state=1) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert mean_score > 0.92 # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = StratifiedKFold(n_splits) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert 0.94 > mean_score assert mean_score > 0.80 def test_shuffle_split(): ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) @pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit]) @pytest.mark.parametrize("train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)]) def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test): # Check that the default value has the expected behavior, i.e. 0.1 if both # unspecified or complement train_size unless both are specified. X = np.ones(10) y = np.ones(10) X_train, X_test = next(split_class(train_size=train_size).split(X, y)) assert len(X_train) == exp_train assert len(X_test) == exp_test @pytest.mark.parametrize("train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)]) def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test): # Check that the default value has the expected behavior, i.e. 0.2 if both # unspecified or complement train_size unless both are specified. X = np.ones(10) y = np.ones(10) groups = range(10) X_train, X_test = next(GroupShuffleSplit(train_size=train_size) .split(X, y, groups)) assert len(X_train) == exp_train assert len(X_test) == exp_test @ignore_warnings def test_stratified_shuffle_split_init(): X = np.arange(7) y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, next, StratifiedShuffleSplit(3, 0.2).split(X, y)) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y)) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 3, 2).split(X, y)) X = np.arange(9) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Train size or test size too small assert_raises(ValueError, next, StratifiedShuffleSplit(train_size=2).split(X, y)) assert_raises(ValueError, next, StratifiedShuffleSplit(test_size=2).split(X, y)) def test_stratified_shuffle_split_respects_test_size(): y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) test_size = 5 train_size = 10 sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size, random_state=0).split(np.ones(len(y)), y) for train, test in sss: assert len(train) == train_size assert len(test) == test_size def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50), np.concatenate([[i] * (100 + i) for i in range(11)]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'], ] for y in ys: sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split(np.ones(len(y)), y) y = np.asanyarray(y) # To make it indexable for y[train] # this is how test-size is computed internally # in _validate_shuffle_split test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert len(train) + len(test) == y.size assert len(train) == train_size assert len(test) == test_size assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_splits = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: prob = bf.pmf(count) assert prob > threshold, \ "An index is not drawn with chance corresponding to even draws" for n_samples in (6, 22): groups = np.array((n_samples // 2) * [0, 1]) splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits_actual = 0 for train, test in splits.split(X=np.ones(n_samples), y=groups): n_splits_actual += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert n_splits_actual == n_splits n_train, n_test = _validate_shuffle_split( n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds)) assert len(train) == n_train assert len(test) == n_test assert len(set(train).intersection(test)) == 0 group_counts = np.unique(groups) assert splits.test_size == 1.0 / n_folds assert n_train + n_test == len(groups) assert len(group_counts) == 2 ex_test_p = float(n_test) / n_samples ex_train_p = float(n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_stratified_shuffle_split_overlap_train_test_bug(): # See https://github.com/scikit-learn/scikit-learn/issues/6121 for # the original bug report y = [0, 1, 2, 3] * 3 + [4, 5] * 5 X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) # no overlap assert_array_equal(np.intersect1d(train, test), []) # complete partition assert_array_equal(np.union1d(train, test), np.arange(len(y))) def test_stratified_shuffle_split_multilabel(): # fix for issue 9037 for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]), np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]: X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) y_train = y[train] y_test = y[test] # no overlap assert_array_equal(np.intersect1d(train, test), []) # complete partition assert_array_equal(np.union1d(train, test), np.arange(len(y))) # correct stratification of entire rows # (by design, here y[:, 0] uniquely determines the entire row of y) expected_ratio = np.mean(y[:, 0]) assert expected_ratio == np.mean(y_train[:, 0]) assert expected_ratio == np.mean(y_test[:, 0]) def test_stratified_shuffle_split_multilabel_many_labels(): # fix in PR #9922: for multilabel data with > 1000 labels, str(row) # truncates with an ellipsis for elements in positions 4 through # len(row) - 4, so labels were not being correctly split using the powerset # method for transforming a multilabel problem to a multiclass one; this # test checks that this problem is fixed. row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1] row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1] y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100) X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(sss.split(X=X, y=y)) y_train = y[train] y_test = y[test] # correct stratification of entire rows # (by design, here y[:, 4] uniquely determines the entire row of y) expected_ratio = np.mean(y[:, 4]) assert expected_ratio == np.mean(y_train[:, 4]) assert expected_ratio == np.mean(y_test[:, 4]) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = np.full(10, -1.) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps = PredefinedSplit(folds) # n_splits is simply the no of unique folds assert len(np.unique(folds)) == ps.get_n_splits() ps_train, ps_test = zip(*ps.split()) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_group_shuffle_split(): for groups_i in test_groups: X = y = np.ones(len(groups_i)) n_splits = 6 test_size = 1. / 3 slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert slo.get_n_splits(X, y, groups=groups_i) == n_splits l_unique = np.unique(groups_i) l = np.asarray(groups_i) for train, test in slo.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa l_train_unique = np.unique(l[train]) l_test_unique = np.unique(l[test]) assert not np.any(np.in1d(l[train], l_test_unique)) assert not np.any(np.in1d(l[test], l_train_unique)) # Second test: train and test add up to all the data assert l[train].size + l[test].size == l.size # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test groups are correct, +- 1 for rounding error assert abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1 assert abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1 def test_leave_one_p_group_out(): logo = LeaveOneGroupOut() lpgo_1 = LeavePGroupsOut(n_groups=1) lpgo_2 = LeavePGroupsOut(n_groups=2) # Make sure the repr works assert repr(logo) == 'LeaveOneGroupOut()' assert repr(lpgo_1) == 'LeavePGroupsOut(n_groups=1)' assert repr(lpgo_2) == 'LeavePGroupsOut(n_groups=2)' assert (repr(LeavePGroupsOut(n_groups=3)) == 'LeavePGroupsOut(n_groups=3)') for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): for i, groups_i in enumerate(test_groups): n_groups = len(np.unique(groups_i)) n_splits = (n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2) X = y = np.ones(len(groups_i)) # Test that the length is correct assert cv.get_n_splits(X, y, groups=groups_i) == n_splits groups_arr = np.asarray(groups_i) # Split using the original list / array / list of string groups_i for train, test in cv.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa assert_array_equal(np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), []) # Second test: train and test add up to all the data assert len(train) + len(test) == len(groups_i) # Third test: # The number of groups in test must be equal to p_groups_out assert np.unique(groups_arr[test]).shape[0], p_groups_out # check get_n_splits() with dummy parameters assert logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']) == 3 assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3 assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6 assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4 # raise ValueError if a `groups` parameter is illegal with assert_raises(ValueError): logo.get_n_splits(None, None, [0.0, np.nan, 0.0]) with assert_raises(ValueError): lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0]) msg = "The 'groups' parameter should not be None." assert_raise_message(ValueError, msg, logo.get_n_splits, None, None, None) assert_raise_message(ValueError, msg, lpgo_1.get_n_splits, None, None, None) def test_leave_group_out_changing_groups(): # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if # the groups variable is changed before calling split groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) X = np.ones(len(groups)) groups_changing = np.array(groups, copy=True) lolo = LeaveOneGroupOut().split(X, groups=groups) lolo_changing = LeaveOneGroupOut().split(X, groups=groups) lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) groups_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 assert ( 3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups)) # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups) def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): X = y = groups = np.ones(0) assert_raise_message(ValueError, "Found array with 0 sample(s)", next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than 2 unique groups ({}). " "LeaveOneGroupOut expects at least 2.").format(groups) assert_raise_message(ValueError, msg, next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ({}). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups " "be present").format(groups) assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) X = y = groups = np.arange(3) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ({}). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups " "be present").format(groups) assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) @ignore_warnings def test_repeated_cv_value_errors(): # n_repeats is not integer or <= 0 for cv in (RepeatedKFold, RepeatedStratifiedKFold): assert_raises(ValueError, cv, n_repeats=0) assert_raises(ValueError, cv, n_repeats=1.5) @pytest.mark.parametrize( "RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold] ) def test_repeated_cv_repr(RepeatedCV): n_splits, n_repeats = 2, 6 repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats) repeated_cv_repr = ('{}(n_repeats=6, n_splits=2, random_state=None)' .format(repeated_cv.__class__.__name__)) assert repeated_cv_repr == repr(repeated_cv) def test_repeated_kfold_determinstic_split(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] random_state = 258173307 rkf = RepeatedKFold( n_splits=2, n_repeats=2, random_state=random_state) # split should produce same and deterministic splits on # each call for _ in range(3): splits = rkf.split(X) train, test = next(splits) assert_array_equal(train, [2, 4]) assert_array_equal(test, [0, 1, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 3]) assert_array_equal(test, [2, 4]) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [2, 3, 4]) train, test = next(splits) assert_array_equal(train, [2, 3, 4]) assert_array_equal(test, [0, 1]) assert_raises(StopIteration, next, splits) def test_get_n_splits_for_repeated_kfold(): n_splits = 3 n_repeats = 4 rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats) expected_n_splits = n_splits * n_repeats assert expected_n_splits == rkf.get_n_splits() def test_get_n_splits_for_repeated_stratified_kfold(): n_splits = 3 n_repeats = 4 rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats) expected_n_splits = n_splits * n_repeats assert expected_n_splits == rskf.get_n_splits() def test_repeated_stratified_kfold_determinstic_split(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] y = [1, 1, 1, 0, 0] random_state = 1944695409 rskf = RepeatedStratifiedKFold( n_splits=2, n_repeats=2, random_state=random_state) # split should produce same and deterministic splits on # each call for _ in range(3): splits = rskf.split(X, y) train, test = next(splits) assert_array_equal(train, [1, 4]) assert_array_equal(test, [0, 2, 3]) train, test = next(splits) assert_array_equal(train, [0, 2, 3]) assert_array_equal(test, [1, 4]) train, test = next(splits) assert_array_equal(train, [2, 3]) assert_array_equal(test, [0, 1, 4]) train, test = next(splits) assert_array_equal(train, [0, 1, 4]) assert_array_equal(test, [2, 3]) assert_raises(StopIteration, next, splits) def test_train_test_split_errors(): pytest.raises(ValueError, train_test_split) pytest.raises(ValueError, train_test_split, range(3), train_size=1.1) pytest.raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) pytest.raises(ValueError, train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) pytest.raises(ValueError, train_test_split, range(3), test_size="wrong_type") pytest.raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) pytest.raises(TypeError, train_test_split, range(3), some_argument=1.1) pytest.raises(ValueError, train_test_split, range(3), range(42)) pytest.raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True) with pytest.raises(ValueError, match=r'train_size=11 should be either positive and ' r'smaller than the number of samples 10 or a ' r'float in the \(0, 1\) range'): train_test_split(range(10), train_size=11, test_size=1) @pytest.mark.parametrize("train_size,test_size", [ (1.2, 0.8), (1., 0.8), (0.0, 0.8), (-.2, 0.8), (0.8, 1.2), (0.8, 1.), (0.8, 0.), (0.8, -.2)]) def test_train_test_split_invalid_sizes1(train_size, test_size): with pytest.raises(ValueError, match=r'should be .* in the \(0, 1\) range'): train_test_split(range(10), train_size=train_size, test_size=test_size) @pytest.mark.parametrize("train_size,test_size", [ (-10, 0.8), (0, 0.8), (11, 0.8), (0.8, -10), (0.8, 0), (0.8, 11)]) def test_train_test_split_invalid_sizes2(train_size, test_size): with pytest.raises(ValueError, match=r'should be either positive and smaller'): train_test_split(range(10), train_size=train_size, test_size=test_size) @pytest.mark.parametrize("train_size, exp_train, exp_test", [(None, 7, 3), (8, 8, 2), (0.8, 8, 2)]) def test_train_test_split_default_test_size(train_size, exp_train, exp_test): # Check that the default value has the expected behavior, i.e. complement # train_size unless both are specified. X_train, X_test = train_test_split(X, train_size=train_size) assert len(X_train) == exp_train assert len(X_test) == exp_test def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert len(y_test) == len(y_train) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # don't convert lists to anything else by default split = train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert isinstance(y_train, list) assert isinstance(y_test, list) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = train_test_split(X_4d, y_3d) assert split[0].shape == (7, 5, 3, 2) assert split[1].shape == (3, 5, 3, 2) assert split[2].shape == (7, 7, 11) assert split[3].shape == (3, 7, 11) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert len(test) == exp_test_size assert len(test) + len(train) == len(y) # check the 1:1 ratio of ones and twos in the data is preserved assert np.sum(train == 1) == np.sum(train == 2) # test unshuffled split y = np.arange(10) for test_size in [2, 0.2]: train, test = train_test_split(y, shuffle=False, test_size=test_size) assert_array_equal(test, [8, 9]) assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7]) @ignore_warnings def test_train_test_split_pandas(): # check train_test_split doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = train_test_split(X_df) assert isinstance(X_train, InputFeatureType) assert isinstance(X_test, InputFeatureType) def test_train_test_split_sparse(): # check that train_test_split converts scipy sparse matrices # to csr, as stated in the documentation X = np.arange(100).reshape((10, 10)) sparse_types = [csr_matrix, csc_matrix, coo_matrix] for InputFeatureType in sparse_types: X_s = InputFeatureType(X) X_train, X_test = train_test_split(X_s) assert isinstance(X_train, csr_matrix) assert isinstance(X_test, csr_matrix) def test_train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = train_test_split(X_df) assert isinstance(X_train, MockDataFrame) assert isinstance(X_test, MockDataFrame) X_train_arr, X_test_arr = train_test_split(X_df) def test_train_test_split_list_input(): # Check that when y is a list / list of string labels, it works. X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() for stratify in (True, False): X_train1, X_test1, y_train1, y_test1 = train_test_split( X, y1, stratify=y1 if stratify else None, random_state=0) X_train2, X_test2, y_train2, y_test2 = train_test_split( X, y2, stratify=y2 if stratify else None, random_state=0) X_train3, X_test3, y_train3, y_test3 = train_test_split( X, y3, stratify=y3 if stratify else None, random_state=0) np.testing.assert_equal(X_train1, X_train2) np.testing.assert_equal(y_train2, y_train3) np.testing.assert_equal(X_test1, X_test3) np.testing.assert_equal(y_test3, y_test2) @pytest.mark.parametrize("test_size, train_size", [(2.0, None), (1.0, None), (0.1, 0.95), (None, 1j), (11, None), (10, None), (8, 3)]) def test_shufflesplit_errors(test_size, train_size): with pytest.raises(ValueError): next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X)) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = ShuffleSplit(random_state=21) assert_array_equal(list(a for a, b in ss.split(X)), list(a for a, b in ss.split(X))) def test_stratifiedshufflesplit_list_input(): # Check that when y is a list / list of string labels, it works. sss = StratifiedShuffleSplit(test_size=2, random_state=42) X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) train_test_split(X, y, test_size=0.2, random_state=42) def test_check_cv(): X = np.ones(9) cv = check_cv(3, classifier=False) # Use numpy.testing.assert_equal which recursively compares # lists of lists np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = check_cv(3, y_binary, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary))) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = check_cv(3, y_multiclass, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass))) # also works with 2d multiclass y_multiclass_2d = y_multiclass.reshape(-1, 1) cv = check_cv(3, y_multiclass_2d, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass_2d)), list(cv.split(X, y_multiclass_2d))) assert not np.all( next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] == next(KFold(3).split(X, y_multiclass_2d))[0]) X = np.ones(5) y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]]) cv = check_cv(3, y_multilabel, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = check_cv(3, y_multioutput, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) assert_raises(ValueError, check_cv, cv="lolo") def test_cv_iterable_wrapper(): kf_iter = KFold().split(X, y) kf_iter_wrapped = check_cv(kf_iter) # Since the wrapped iterable is enlisted and stored, # split can be called any number of times to produce # consistent results. np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y))) # If the splits are randomized, successive calls to split yields different # results kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y) kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) # numpy's assert_array_equal properly compares nested lists np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) try: splits_are_equal = True np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) except AssertionError: splits_are_equal = False assert not splits_are_equal, ( "If the splits are randomized, " "successive calls to split should yield different results") def test_group_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_groups = 15 n_samples = 1000 n_splits = 5 X = y = np.ones(n_samples) # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed groups = rng.randint(0, n_groups, n_samples) ideal_n_groups_per_fold = n_samples // n_splits len(np.unique(groups)) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) lkf = GroupKFold(n_splits=n_splits) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert len(folds) == len(groups) for i in np.unique(folds): assert (tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold for group in np.unique(groups): assert len(np.unique(folds[groups == group])) == 1 # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert len(np.intersect1d(groups[train], groups[test])) == 0 # Construct the test data groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']) n_groups = len(np.unique(groups)) n_samples = len(groups) n_splits = 5 tolerance = 0.05 * n_samples # 5 percent error allowed ideal_n_groups_per_fold = n_samples // n_splits X = y = np.ones(n_samples) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert len(folds) == len(groups) for i in np.unique(folds): assert (tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) for group in np.unique(groups): assert len(np.unique(folds[groups == group])) == 1 # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert len(np.intersect1d(groups[train], groups[test])) == 0 # groups can also be a list cv_iter = list(lkf.split(X, y, groups.tolist())) for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): assert_array_equal(train1, train2) assert_array_equal(test1, test2) # Should fail if there are more folds than groups groups = np.array([1, 1, 1, 2, 2]) X = y = np.ones(len(groups)) assert_raises_regexp(ValueError, "Cannot have number of splits.*greater", next, GroupKFold(n_splits=3).split(X, y, groups)) def test_time_series_cv(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] # Should fail if there are more folds than samples assert_raises_regexp(ValueError, "Cannot have number of folds.*greater", next, TimeSeriesSplit(n_splits=7).split(X)) tscv = TimeSeriesSplit(2) # Manually check that Time Series CV preserves the data # ordering on toy datasets splits = tscv.split(X[:-1]) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [2, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3]) assert_array_equal(test, [4, 5]) splits = TimeSeriesSplit(2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1, 2]) assert_array_equal(test, [3, 4]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4]) assert_array_equal(test, [5, 6]) # Check get_n_splits returns the correct number of splits splits = TimeSeriesSplit(2).split(X) n_splits_actual = len(list(splits)) assert n_splits_actual == tscv.get_n_splits() assert n_splits_actual == 2 def _check_time_series_max_train_size(splits, check_splits, max_train_size): for (train, test), (check_train, check_test) in zip(splits, check_splits): assert_array_equal(test, check_test) assert len(check_train) <= max_train_size suffix_start = max(len(train) - max_train_size, 0) assert_array_equal(check_train, train[suffix_start:]) def test_time_series_max_train_size(): X = np.zeros((6, 1)) splits = TimeSeriesSplit(n_splits=3).split(X) check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=3) # Test for the case where the size of a fold is greater than max_train_size check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=2) # Test for the case where the size of each fold is less than max_train_size check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X) _check_time_series_max_train_size(splits, check_splits, max_train_size=2) def test_time_series_test_size(): X = np.zeros((10, 1)) # Test alone splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X) train, test = next(splits) assert_array_equal(train, [0]) assert_array_equal(test, [1, 2, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3]) assert_array_equal(test, [4, 5, 6]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6]) assert_array_equal(test, [7, 8, 9]) # Test with max_train_size splits = TimeSeriesSplit(n_splits=2, test_size=2, max_train_size=4).split(X) train, test = next(splits) assert_array_equal(train, [2, 3, 4, 5]) assert_array_equal(test, [6, 7]) train, test = next(splits) assert_array_equal(train, [4, 5, 6, 7]) assert_array_equal(test, [8, 9]) # Should fail with not enough data points for configuration with pytest.raises(ValueError, match="Too many splits.*with test_size"): splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X) next(splits) def test_time_series_gap(): X = np.zeros((10, 1)) # Test alone splits = TimeSeriesSplit(n_splits=2, gap=2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [4, 5, 6]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4]) assert_array_equal(test, [7, 8, 9]) # Test with max_train_size splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [4, 5]) train, test = next(splits) assert_array_equal(train, [2, 3]) assert_array_equal(test, [6, 7]) train, test = next(splits) assert_array_equal(train, [4, 5]) assert_array_equal(test, [8, 9]) # Test with test_size splits = TimeSeriesSplit(n_splits=2, gap=2, max_train_size=4, test_size=2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3]) assert_array_equal(test, [6, 7]) train, test = next(splits) assert_array_equal(train, [2, 3, 4, 5]) assert_array_equal(test, [8, 9]) # Test with additional test_size splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [4, 5, 6]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4]) assert_array_equal(test, [7, 8, 9]) # Verify proper error is thrown with pytest.raises(ValueError, match="Too many splits.*and gap"): splits = TimeSeriesSplit(n_splits=4, gap=2).split(X) next(splits) def test_nested_cv(): # Test if nested cross validation works with different combinations of cv rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 5, 15) cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(n_splits=3), StratifiedKFold(), StratifiedShuffleSplit(n_splits=3, random_state=0)] for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]}, cv=inner_cv, error_score='raise') cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv, fit_params={'groups': groups}) def test_build_repr(): class MockSplitter: def __init__(self, a, b=0, c=None): self.a = a self.b = b self.c = c def __repr__(self): return _build_repr(self) assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)" @pytest.mark.parametrize('CVSplitter', (ShuffleSplit, GroupShuffleSplit, StratifiedShuffleSplit)) def test_shuffle_split_empty_trainset(CVSplitter): cv = CVSplitter(test_size=.99) X, y = [[1]], [0] # 1 sample with pytest.raises( ValueError, match='With n_samples=1, test_size=0.99 and train_size=None, ' 'the resulting train set will be empty'): next(cv.split(X, y, groups=[1])) def test_train_test_split_empty_trainset(): X, = [[1]] # 1 sample with pytest.raises( ValueError, match='With n_samples=1, test_size=0.99 and train_size=None, ' 'the resulting train set will be empty'): train_test_split(X, test_size=.99) X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds with pytest.raises( ValueError, match='With n_samples=3, test_size=0.67 and train_size=None, ' 'the resulting train set will be empty'): train_test_split(X, test_size=.67) def test_leave_one_out_empty_trainset(): # LeaveOneGroup out expect at least 2 groups so no need to check cv = LeaveOneOut() X, y = [[1]], [0] # 1 sample with pytest.raises( ValueError, match='Cannot perform LeaveOneOut with n_samples=1'): next(cv.split(X, y)) def test_leave_p_out_empty_trainset(): # No need to check LeavePGroupsOut cv = LeavePOut(p=2) X, y = [[1], [2]], [0, 3] # 2 samples with pytest.raises( ValueError, match='p=2 must be strictly less than the number of samples=2'): next(cv.split(X, y, groups=[1, 2])) @pytest.mark.parametrize('Klass', (KFold, StratifiedKFold)) def test_random_state_shuffle_false(Klass): # passing a non-default random_state when shuffle=False makes no sense with pytest.raises(ValueError, match='has no effect since shuffle is False'): Klass(3, shuffle=False, random_state=0) @pytest.mark.parametrize('cv, expected', [ (KFold(), True), (KFold(shuffle=True, random_state=123), True), (StratifiedKFold(), True), (StratifiedKFold(shuffle=True, random_state=123), True), (RepeatedKFold(random_state=123), True), (RepeatedStratifiedKFold(random_state=123), True), (ShuffleSplit(random_state=123), True), (GroupShuffleSplit(random_state=123), True), (StratifiedShuffleSplit(random_state=123), True), (GroupKFold(), True), (TimeSeriesSplit(), True), (LeaveOneOut(), True), (LeaveOneGroupOut(), True), (LeavePGroupsOut(n_groups=2), True), (LeavePOut(p=2), True), (KFold(shuffle=True, random_state=None), False), (KFold(shuffle=True, random_state=None), False), (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), (RepeatedKFold(random_state=None), False), (RepeatedKFold(random_state=np.random.RandomState(0)), False), (RepeatedStratifiedKFold(random_state=None), False), (RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False), (ShuffleSplit(random_state=None), False), (ShuffleSplit(random_state=np.random.RandomState(0)), False), (GroupShuffleSplit(random_state=None), False), (GroupShuffleSplit(random_state=np.random.RandomState(0)), False), (StratifiedShuffleSplit(random_state=None), False), (StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False), ]) def test_yields_constant_splits(cv, expected): assert _yields_constant_splits(cv) == expected
bsd-3-clause
yuval-harpaz/MNE4D
pyScripts/Noam_mneTest.py
1
15944
__author__ = 'noam' import mne import matplotlib.pyplot as plt import numpy as np import os import sys from mne.viz import plot_topo from mne.minimum_norm import (make_inverse_operator, apply_inverse, write_inverse_operator, read_inverse_operator) from mne.source_estimate import read_source_estimate from mne.commands import mne_bti2fiff import os.path as op import numpy as np from numpy.random import randn from scipy import stats as stats import mne from mne import (io, spatial_tris_connectivity, compute_morph_matrix, grade_to_tris) from mne.epochs import equalize_epoch_counts from mne.stats import (spatio_temporal_cluster_1samp_test, summarize_clusters_stc) from mne.viz import mne_analyze_colormap try: from mne.time_frequency import induced_power except: print('no induced_power module') def getFileName(fname): return os.path.join(SUBJECT_FOLDER, '{}-{}.fif'.format(SUBJECT, fname)) SUBJECTS_DIR = '/home/noam/Documents/MEGdata/centipede/mne' SUBJECT = 'Idan' SUBJECT_FOLDER = os.path.join(SUBJECTS_DIR, SUBJECT) RAW_4D = os.path.join(SUBJECT_FOLDER, 'rs,hb,lf_c,rfhp0.1Hz') RAW = getFileName('raw') EVO = getFileName('ave') COV = getFileName('cov') EPO = getFileName('epo') FWD = getFileName('fwd') INV = getFileName('inv') # This file was created using # mne_watershed_bem --atlas # mne_setup_mri --overwrite # Then, open mne_analyze and follow these instructions: # http://martinos.org/mne/stable/manual/sampledata.html#chdijbig MRI = os.path.join(SUBJECT_FOLDER, 'mri', 'transforms', 'Idan-trans.fif') # The following files were created using created using: # mne_watershed_bem --atlas --overwrite # Them, copy the surface files from bem/watershed, and rename them # like outer_skull_surface -> outer_skull.surf # Then setup the forward model # mne_setup_forward_model --homog --surf --ico 4 --overwrite SRC = os.path.join(SUBJECT_FOLDER, 'bem', 'Idan-src.fif') BEM = os.path.join(SUBJECT_FOLDER, 'bem', 'Idan-5120-bem-sol.fif') # Source estimate STC = os.path.join(SUBJECT_FOLDER, 'mne_dSPM_inverse_MEG') def convertToFIF(): sys.argv = ['', '-p', RAW_4D, '-o', RAW] mne_bti2fiff.run() def loadRaw(): # read the data raw = mne.io.Raw(RAW, preload=True) print(raw) return raw def filter(raw): raw.filter(l_freq=1.0, h_freq=50.0) raw.save(RAW, overwrite=True) def calcNoiseCov(raw=None): if (raw is None): raw = mne.io.Raw(RAW, preload=True) picks = mne.pick_types(raw.info, meg=True) events = mne.find_events(raw, stim_channel='STI 014') epoches = [] for tmin, tmax in zip(np.arange(0, 3.5, 0.5), np.arange(0.5, 4, 0.5)): print((tmin, tmax)) epoches.append(findEpoches(raw, picks, events, dict(onset=20), tmin=tmin, tmax=tmax, baseline=(None, None))) noiseCov = mne.compute_covariance([epo['onset'] for epo in epoches], tmax=None) # regularize noise covariance # noiseCov = mne.cov.regularize(noiseCov, evoked.info, # mag=0.05, proj=True) # grad=0.05, eeg=0.1 noiseCov.save(COV) allEpoches = findEpoches(raw, picks, events, dict(onset=20), tmin=0, tmax=3.5) evoked = allEpoches['onset'].average() evoked.save(EVO) def calcEvoked(raw): picks = mne.pick_types(raw.info, meg=True) events = mne.find_events(raw, stim_channel='STI 014') print(events) epochs = findEpoches(raw, picks, events, dict(stay=60, leave=40), tmin=-3.0, tmax=0) # compute evoked response and noise covariance,and plot evoked epochs.save(EPO) evokedStay = epochs['stay'].average() evokedLeave = epochs['leave'].average() contrast = evokedStay - evokedLeave covStay = mne.compute_covariance(epochs['stay'], tmax=0) covLeave = mne.compute_covariance(epochs['leave'], tmax=0) covStay.save(getFileName('stay-cov')) covLeave.save(getFileName('leave-cov')) # evokedStay.plot() # evokedLeave.plot() # contrast.plot() def plotEvokedDiff(): epochs = loadEpoches() evokeds = [epochs[name].average() for name in 'stay', 'leave'] colors = 'yellow', 'green' title = 'Stay vs Leave' plot_topo(evokeds, color=colors, title=title) conditions = [e.comment for e in evokeds] for cond, col, pos in zip(conditions, colors, (0.025, 0.07)): plt.figtext(0.775, pos, cond, color=col, fontsize=12) plt.show() def calcInducedPower(raw): n_cycles = 2 # number of cycles in Morlet wavelet frequencies = np.arange(1, 50, 1) # frequencies of interest Fs = raw.info['sfreq'] # sampling in Hz epoches = loadEpoches() epochesStay = getEpochesData('stay') power, phase_lock = induced_power(epochesStay, Fs=Fs, frequencies=frequencies, n_cycles=2, n_jobs=1) def plotLeadField(fwd=None): if (fwd is None): fwd = mne.read_forward_solution(FWD) leadfield = fwd['sol']['data'] print("Leadfield size : %d x %d" % leadfield.shape) magMap = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed') picks = mne.pick_types(fwd['info'], meg=True, eeg=False) fig, ax = plt.subplots(1, 1, figsize=(10, 8), sharex=True) fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14) im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto') ax.set_title('MEG') ax.set_xlabel('sources') ax.set_ylabel('sensors') plt.colorbar(im, ax=ax, cmap='RdBu_r') plt.show() plotOrientationSensitivity(magMap) plotMagSensitivity(magMap) def plotOrientationSensitivity(magMap=None): if (magMap is None): fwd = mne.read_forward_solution(FWD) magMap = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed') plt.figure() plt.hist(magMap.data.ravel(), bins=20, label='Magnetometers') plt.legend() plt.title('Normal orientation sensitivity') plt.xlabel('sensitivity') plt.ylabel('count') plt.show() def plotMagSensitivity(magMap=None): if (magMap is None): fwd = mne.read_forward_solution(FWD) magMap = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed') args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7) magMap.plot(subject=SUBJECT, time_label='Sensitivity', subjects_dir=SUBJECTS_DIR, **args) def findEpoches(raw, picks, events, event_id, tmin, tmax, baseline=(None, 0)): return mne.Epochs(raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax, proj=True, picks=picks, baseline=baseline, preload=True, reject=None) # reject can be dict(mag=4e-12) def makeForwardSolution(n_jobs=1, usingEEG=False, outputFileName='fwd'): fwd = mne.make_forward_solution(RAW, mri=MRI, src=SRC, bem=BEM, fname=FWD, meg=True, eeg=usingEEG, mindist=5.0, n_jobs=n_jobs, overwrite=True) # convert to surface orientation for better visualization fwd = mne.convert_forward_solution(fwd, surf_ori=True) return fwd def makeInverseOperator(): # http://martinos.org/mne/stable/auto_examples/inverse/plot_make_inverse_operator.html snr = 3.0 lambda2 = 1.0 / snr ** 2 # Load data evoked = mne.read_evokeds(EVO, condition=0, baseline=(None, 0)) forward_meg = mne.read_forward_solution(FWD, surf_ori=True) noise_cov = mne.read_cov(COV) # regularize noise covariance noise_cov = mne.cov.regularize(noise_cov, evoked.info, mag=0.05, proj=True) # grad=0.05, eeg=0.1 # Restrict forward solution as necessary for MEG forward_meg = mne.pick_types_forward(forward_meg, meg=True, eeg=False) # make an M/EEG, MEG-only, and EEG-only inverse operators info = evoked.info inverse_operator_meg = make_inverse_operator(info, forward_meg, noise_cov, loose=0.2, depth=0.8) write_inverse_operator(INV, inverse_operator_meg) # Compute inverse solution stcs_meg = apply_inverse(evoked, inverse_operator_meg, lambda2, "dSPM", pick_ori=None) # Save result in stc files stcs_meg.save(STC) plotActivationTS(stcs_meg) def plotActivationTS(stcs_meg): plt.close('all') plt.figure(figsize=(8, 6)) name = 'MEG' stc = stcs_meg plt.plot(1e3 * stc.times, stc.data[::150, :].T) plt.ylabel('%s\ndSPM value' % str.upper(name)) plt.xlabel('time (ms)') plt.show() def plot3DActivity(stc=None): if (stc is None): stc = read_source_estimate(STC) # Plot brain in 3D with PySurfer if available. Note that the subject name # is already known by the SourceEstimate stc object. brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=SUBJECTS_DIR, subject=SUBJECT) brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True) brain.show_view('lateral') # use peak getter to move vizualization to the time point of the peak vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True) brain.set_data_time_index(time_idx) # draw marker at maximum peaking vertex brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue', scale_factor=0.6) brain.save_image(getPngName('dSPM_map')) def permuationTest(): # http://martinos.org/mne/stable/auto_examples/stats/plot_cluster_stats_spatio_temporal.html#example-stats-plot-cluster-stats-spatio-temporal-py epoches = loadEpoches() epo1 = epoches['stay'] epo2 = epoches['leave'] # Equalize trial counts to eliminate bias (which would otherwise be # introduced by the abs() performed below) equalize_epoch_counts([epo1, epo2]) ############################################################################### # Transform to source space snr = 3.0 lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) inverse_operator = read_inverse_operator(INV) sample_vertices = [s['vertno'] for s in inverse_operator['src']] # Let's average and compute inverse, resampling to speed things up evoked1 = epo1.average() evoked1.resample(50) condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method) evoked2 = epo2.average() evoked2.resample(50) condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method) # Let's only deal with t > 0, cropping to reduce multiple comparisons # condition1.crop(0, None) # condition2.crop(0, None) tmin = condition1.tmin tstep = condition1.tstep ############################################################################### # Transform to common cortical space # Normally you would read in estimates across several subjects and morph # them to the same cortical space (e.g. fsaverage). For example purposes, # we will simulate this by just having each "subject" have the same # response (just noisy in source space) here. Note that for 7 subjects # with a two-sided statistical test, the minimum significance under a # permutation test is only p = 1/(2 ** 6) = 0.015, which is large. n_vertices_sample, n_times = condition1.data.shape n_subjects = 7 print('Simulating data for %d subjects.' % n_subjects) # Let's make sure our results replicate, so set the seed. np.random.seed(0) X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10 X[:, :, :, 0] += condition1.data[:, :, np.newaxis] X[:, :, :, 1] += condition2.data[:, :, np.newaxis] # It's a good idea to spatially smooth the data, and for visualization # purposes, let's morph these to fsaverage, which is a grade 5 source space # with vertices 0:10242 for each hemisphere. Usually you'd have to morph # each subject's data separately (and you might want to use morph_data # instead), but here since all estimates are on 'sample' we can use one # morph matrix for all the heavy lifting. fsave_vertices = [np.arange(10242), np.arange(10242)] morph_mat = compute_morph_matrix('Idan', 'fsaverage', sample_vertices, fsave_vertices, 20, SUBJECTS_DIR, ) n_vertices_fsave = morph_mat.shape[0] # We have to change the shape for the dot() to work properly X = X.reshape(n_vertices_sample, n_times * n_subjects * 2) print('Morphing data.') X = morph_mat.dot(X) # morph_mat is a sparse matrix X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2) # Finally, we want to compare the overall activity levels in each condition, # the diff is taken along the last axis (condition). The negative sign makes # it so condition1 > condition2 shows up as "red blobs" (instead of blue). X = np.abs(X) # only magnitude X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast ############################################################################### # Compute statistic # To use an algorithm optimized for spatio-temporal clustering, we # just pass the spatial connectivity matrix (instead of spatio-temporal) print('Computing connectivity.') connectivity = spatial_tris_connectivity(grade_to_tris(5)) # Note that X needs to be a multi-dimensional array of shape # samples (subjects) x time x space, so we permute dimensions X = np.transpose(X, [2, 1, 0]) # Now let's actually do the clustering. This can take a long time... # Here we set the threshold quite high to reduce computation. p_threshold = 0.001 t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1) print('Clustering.') T_obs, clusters, cluster_p_values, H0 = clu = \ spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=2, threshold=t_threshold) # Now select the clusters that are sig. at p < 0.05 (note that this value # is multiple-comparisons corrected). good_cluster_inds = np.where(cluster_p_values < 0.05)[0] ############################################################################### # Visualize the clusters print('Visualizing clusters.') # Now let's build a convenient representation of each cluster, where each # cluster becomes a "time point" in the SourceEstimate stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep, vertno=fsave_vertices, subject='fsaverage') # Let's actually plot the first "time point" in the SourceEstimate, which # shows all the clusters, weighted by duration colormap = mne_analyze_colormap(limits=[0, 10, 50]) # blue blobs are for condition A < condition B, red for A > B brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both', colormap, subjects_dir=SUBJECTS_DIR, time_label='Duration significant (ms)') brain.set_data_time_index(0) # The colormap requires brain data to be scaled -fmax -> fmax brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False) brain.show_view('lateral') brain.save_image('clusters.png') def loadEpoches(epochesName='epo'): return mne.read_epochs(getFileName(epochesName)) def loadEvoked(evokedName, condition): return mne.read_evokeds(getFileName(evokedName), condition=condition, baseline=(None, 0), proj=True) def getEpochesData(epochs, condName): return epochs[condName].get_data() def readInverseOp(invOpFileName): inverse_operator = read_inverse_operator(invOpFileName) return inverse_operator def getPngName(fname): return os.path.join(SUBJECT_FOLDER, '{}.png'.format(fname)) if __name__ == '__main__': # raw = loadRaw() # filter(raw) # calcNoiseCov() # plotEvokedDiff() # epoches = loadEpoches() # makeForwardSolution(n_jobs=4) # plotLeadField() # plotMagSensitivity() # makeInverseOperator() # plot3DActivity() permuationTest() print('finish!')
gpl-2.0
zblasingame/EE416
line_scan/machine_learning/model.py
1
2031
"""Implementation of Linear Regression in Numpy Author: Zander Blasingame Institution: Clarkson University Lab: CAMEL """ import numpy as np from sklearn.metrics import confusion_matrix import datasets as ds class LR: """Linear Regression for Anomaly Detection Args: num_features (int): Number of features for the classifier. normalize (str = 'rescaling'): Normalization mode from the set {'rescaling', 'vector_norm', 'none'} reg_param (float = 1e-8): Regularization parameter. """ def __init__(self, num_features, **kwargs): defaults = { 'normalize': 'rescaling', 'reg_param': 1e-8 } self.num_features = num_features vars(self).update({p: kwargs.get(p, d) for p, d in defaults.items()}) def train(self, X, Y): """Train LR model given benign samples. Args: X (np.array): Training matrix of shape (samples, features) """ X = X.astype(np.float64) # normalize X if self.normalize == 'rescaling': self.min = X.min(axis=0) self.max = X.max(axis=0) X = ds.rescale(X, self.min, self.max, 0, 1) # add bias vector to all samples X = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1) xTx = X.T.dot(X) inv = np.linalg.inv(xTx + self.reg_param * np.eye(self.num_features+1)) self.theta = inv.dot(X.T).dot(Y) return self.theta def test(self, X, Y): """Evaluate model performance. Args: X (np.array): Testing features. Y (np.array): Testing labels. """ X = X.astype(np.float64) # normalize X if self.normalize == 'rescaling': X = ds.rescale(X, self.min, self.max, 0, 1) # add bias vector to all samples X = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1) scores = np.dot(self.theta, X.T) return np.mean(scores - Y)
gpl-3.0
andyraib/data-storage
python_scripts/env/lib/python3.6/site-packages/matplotlib/delaunay/testfuncs.py
21
21168
"""Some test functions for bivariate interpolation. Most of these have been yoinked from ACM TOMS 792. http://netlib.org/toms/792 """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six from six.moves import xrange import numpy as np from .triangulate import Triangulation class TestData(dict): def __init__(self, *args, **kwds): dict.__init__(self, *args, **kwds) self.__dict__ = self class TestDataSet(object): def __init__(self, **kwds): self.__dict__.update(kwds) data = TestData( franke100=TestDataSet( x=np.array([0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029, -0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785, 0.1324189, 0.1090271, 0.1254439, 0.093454, 0.0767578, 0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559, 0.2645602, 0.2391645, 0.208899, 0.2767329, 0.1714726, 0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219, 0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321, 0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515, 0.4149771, 0.4277679, 0.420001, 0.4663631, 0.4855658, 0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321, 0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311, 0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188, 0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181, 0.633359, 0.6908947, 0.6895638, 0.6718889, 0.6837675, 0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034, 0.8086609, 0.8214531, 0.729064, 0.8076643, 0.8170951, 0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122, 0.8599583, 0.91757, 0.8596328, 0.9279871, 0.8512805, 1.044982, 0.9670631, 0.9857884, 0.9676313, 1.0129299, 0.965704, 1.0019855, 1.0359297, 1.0414677, 0.9471506]), y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596, 0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289, 0.050133, 0.0918555, 0.2592973, 0.3381592, 0.4171125, 0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421, 0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738, 0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412, 0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989, 0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719, -0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417, 0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728, -0.0271948, 0.127243, 0.2709269, 0.3477728, 0.4259422, 0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762, 0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704, 0.5096324, 0.669788, 0.7759569, 0.9366096, 1.0064516, 0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228, 0.6091595, 0.6685053, 0.8022808, 0.847679, 1.0512371, 0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632, 0.5910139, 0.6307383, 0.8144841, 0.904231, 0.969603, -0.01209, 0.1334114, 0.2695844, 0.3795281, 0.4396054, 0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])), franke33=TestDataSet( x=np.array([5.00000000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e-01, 1.00000000e-01, 1.50000000e-01, 2.00000000e-01, 2.50000000e-01, 3.00000000e-01, 3.50000000e-01, 5.00000000e-01, 5.00000000e-01, 5.50000000e-01, 6.00000000e-01, 6.00000000e-01, 6.00000000e-01, 6.50000000e-01, 7.00000000e-01, 7.00000000e-01, 7.00000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 8.00000000e-01, 8.00000000e-01, 8.50000000e-01, 9.00000000e-01, 9.00000000e-01, 9.50000000e-01, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00]), y=np.array([4.50000000e-01, 5.00000000e-01, 1.00000000e+00, 0.00000000e+00, 1.50000000e-01, 7.50000000e-01, 3.00000000e-01, 1.00000000e-01, 2.00000000e-01, 3.50000000e-01, 8.50000000e-01, 0.00000000e+00, 1.00000000e+00, 9.50000000e-01, 2.50000000e-01, 6.50000000e-01, 8.50000000e-01, 7.00000000e-01, 2.00000000e-01, 6.50000000e-01, 9.00000000e-01, 1.00000000e-01, 3.50000000e-01, 8.50000000e-01, 4.00000000e-01, 6.50000000e-01, 2.50000000e-01, 3.50000000e-01, 8.00000000e-01, 9.00000000e-01, 0.00000000e+00, 5.00000000e-01, 1.00000000e+00])), lawson25=TestDataSet( x=np.array([0.1375, 0.9125, 0.7125, 0.225, -0.05, 0.475, 0.05, 0.45, 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85, 0.7, 0.275, 0.45, 0.8125, 0.45, 1., 0.5, 0.1875, 0.5875, 1.05, 0.1]), y=np.array([0.975, 0.9875, 0.7625, 0.8375, 0.4125, 0.6375, -0.05, 1.0375, 0.55, 0.8, 0.75, 0.575, 0.55, 0.4375, 0.3125, 0.425, 0.2875, 0.1875, -0.0375, 0.2625, 0.4625, 0.2625, 0.125, -0.06125, 0.1125])), random100=TestDataSet( x=np.array([0.0096326, 0.0216348, 0.029836, 0.0417447, 0.0470462, 0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832, 0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733, 0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405, 0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715, 0.240966, 0.252774, 0.2570839, 0.2733365, 0.2853833, 0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163, 0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172, 0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588, 0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299, 0.494026, 0.5055324, 0.5162593, 0.5219219, 0.5348529, 0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395, 0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381, 0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074, 0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445, 0.7370798, 0.746203, 0.7566957, 0.7699998, 0.7879347, 0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993, 0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077, 0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906, 0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]), y=np.array([0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355, 0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797, 0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958, 0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021, 0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583, 0.8907869, 0.064726, 0.5692618, 0.2947027, 0.4332426, 0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873, 0.9425637, 0.4799701, 0.1783069, 0.114676, 0.8225797, 0.2270688, 0.4073598, 0.887508, 0.7631616, 0.9972804, 0.4959884, 0.3410421, 0.249812, 0.6409007, 0.105869, 0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952, 0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242, 0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627, 0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238, 0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992, 0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319, 0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506, 0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845, 0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934, 0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675])), uniform9=TestDataSet( x=np.array([1.25000000e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 1.25000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 3.75000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 6.25000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 7.50000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 8.75000000e-01, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00]), y=np.array([0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00, 0.00000000e+00, 1.25000000e-01, 2.50000000e-01, 3.75000000e-01, 5.00000000e-01, 6.25000000e-01, 7.50000000e-01, 8.75000000e-01, 1.00000000e+00])), ) def constant(x, y): return np.ones(x.shape, x.dtype) constant.title = 'Constant' def xramp(x, y): return x xramp.title = 'X Ramp' def yramp(x, y): return y yramp.title = 'Y Ramp' def exponential(x, y): x = x * 9 y = y * 9 x1 = x + 1.0 x2 = x - 2.0 x4 = x - 4.0 x7 = x - 7.0 y1 = x + 1.0 y2 = y - 2.0 y3 = y - 3.0 y7 = y - 7.0 f = (0.75 * np.exp(-(x2 * x2 + y2 * y2) / 4.0) + 0.75 * np.exp(-x1 * x1 / 49.0 - y1 / 10.0) + 0.5 * np.exp(-(x7 * x7 + y3 * y3) / 4.0) - 0.2 * np.exp(-x4 * x4 - y7 * y7)) return f exponential.title = 'Exponential and Some Gaussians' def cliff(x, y): f = np.tanh(9.0 * (y - x) + 1.0) / 9.0 return f cliff.title = 'Cliff' def saddle(x, y): f = (1.25 + np.cos(5.4 * y)) / (6.0 + 6.0 * (3 * x - 1.0) ** 2) return f saddle.title = 'Saddle' def gentle(x, y): f = np.exp(-5.0625 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0 return f gentle.title = 'Gentle Peak' def steep(x, y): f = np.exp(-20.25 * ((x - 0.5) ** 2 + (y - 0.5) ** 2)) / 3.0 return f steep.title = 'Steep Peak' def sphere(x, y): circle = 64 - 81 * ((x - 0.5) ** 2 + (y - 0.5) ** 2) f = np.where(circle >= 0, np.sqrt(np.clip(circle, 0, 100)) - 0.5, 0.0) return f sphere.title = 'Sphere' def trig(x, y): f = 2.0 * np.cos(10.0 * x) * np.sin(10.0 * y) + np.sin(10.0 * x * y) return f trig.title = 'Cosines and Sines' def gauss(x, y): x = 5.0 - 10.0 * x y = 5.0 - 10.0 * y g1 = np.exp(-x * x / 2) g2 = np.exp(-y * y / 2) f = g1 + 0.75 * g2 * (1 + g1) return f gauss.title = 'Gaussian Peak and Gaussian Ridges' def cloverleaf(x, y): ex = np.exp((10.0 - 20.0 * x) / 3.0) ey = np.exp((10.0 - 20.0 * y) / 3.0) logitx = 1.0 / (1.0 + ex) logity = 1.0 / (1.0 + ey) f = (((20.0 / 3.0) ** 3 * ex * ey) ** 2 * (logitx * logity) ** 5 * (ex - 2.0 * logitx) * (ey - 2.0 * logity)) return f cloverleaf.title = 'Cloverleaf' def cosine_peak(x, y): circle = np.hypot(80 * x - 40.0, 90 * y - 45.) f = np.exp(-0.04 * circle) * np.cos(0.15 * circle) return f cosine_peak.title = 'Cosine Peak' allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak] class LinearTester(object): name = 'Linear' def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250): self.xrange = xrange self.yrange = yrange self.nrange = nrange self.npoints = npoints rng = np.random.RandomState(1234567890) self.x = rng.uniform(xrange[0], xrange[1], size=npoints) self.y = rng.uniform(yrange[0], yrange[1], size=npoints) self.tri = Triangulation(self.x, self.y) def replace_data(self, dataset): self.x = dataset.x self.y = dataset.y self.tri = Triangulation(self.x, self.y) def interpolator(self, func): z = func(self.x, self.y) return self.tri.linear_extrapolator(z, bbox=self.xrange + self.yrange) def plot(self, func, interp=True, plotter='imshow'): import matplotlib as mpl from matplotlib import pylab as pl if interp: lpi = self.interpolator(func) z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange), self.xrange[0]:self.xrange[1]:complex(0, self.nrange)] else: y, x = np.mgrid[ self.yrange[0]:self.yrange[1]:complex(0, self.nrange), self.xrange[0]:self.xrange[1]:complex(0, self.nrange)] z = func(x, y) z = np.where(np.isinf(z), 0.0, z) extent = (self.xrange[0], self.xrange[1], self.yrange[0], self.yrange[1]) pl.ioff() pl.clf() pl.hot() # Some like it hot if plotter == 'imshow': pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower') elif plotter == 'contour': Y, X = np.ogrid[ self.yrange[0]:self.yrange[1]:complex(0, self.nrange), self.xrange[0]:self.xrange[1]:complex(0, self.nrange)] pl.contour(np.ravel(X), np.ravel(Y), z, 20) x = self.x y = self.y lc = mpl.collections.LineCollection( np.array([((x[i], y[i]), (x[j], y[j])) for i, j in self.tri.edge_db]), colors=[(0, 0, 0, 0.2)]) ax = pl.gca() ax.add_collection(lc) if interp: title = '%s Interpolant' % self.name else: title = 'Reference' if hasattr(func, 'title'): pl.title('%s: %s' % (func.title, title)) else: pl.title(title) pl.show() pl.ion() class NNTester(LinearTester): name = 'Natural Neighbors' def interpolator(self, func): z = func(self.x, self.y) return self.tri.nn_extrapolator(z, bbox=self.xrange + self.yrange) def plotallfuncs(allfuncs=allfuncs): from matplotlib import pylab as pl pl.ioff() nnt = NNTester(npoints=1000) lpt = LinearTester(npoints=1000) for func in allfuncs: print(func.title) nnt.plot(func, interp=False, plotter='imshow') pl.savefig('%s-ref-img.png' % func.__name__) nnt.plot(func, interp=True, plotter='imshow') pl.savefig('%s-nn-img.png' % func.__name__) lpt.plot(func, interp=True, plotter='imshow') pl.savefig('%s-lin-img.png' % func.__name__) nnt.plot(func, interp=False, plotter='contour') pl.savefig('%s-ref-con.png' % func.__name__) nnt.plot(func, interp=True, plotter='contour') pl.savefig('%s-nn-con.png' % func.__name__) lpt.plot(func, interp=True, plotter='contour') pl.savefig('%s-lin-con.png' % func.__name__) pl.ion() def plot_dt(tri, colors=None): import matplotlib as mpl from matplotlib import pylab as pl if colors is None: colors = [(0, 0, 0, 0.2)] lc = mpl.collections.LineCollection( np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j])) for i, j in tri.edge_db]), colors=colors) ax = pl.gca() ax.add_collection(lc) pl.draw_if_interactive() def plot_vo(tri, colors=None): import matplotlib as mpl from matplotlib import pylab as pl if colors is None: colors = [(0, 1, 0, 0.2)] lc = mpl.collections.LineCollection(np.array( [(tri.circumcenters[i], tri.circumcenters[j]) for i in xrange(len(tri.circumcenters)) for j in tri.triangle_neighbors[i] if j != -1]), colors=colors) ax = pl.gca() ax.add_collection(lc) pl.draw_if_interactive() def plot_cc(tri, edgecolor=None): import matplotlib as mpl from matplotlib import pylab as pl if edgecolor is None: edgecolor = (0, 0, 1, 0.2) dxy = (np.array([(tri.x[i], tri.y[i]) for i, j, k in tri.triangle_nodes]) - tri.circumcenters) r = np.hypot(dxy[:, 0], dxy[:, 1]) ax = pl.gca() for i in xrange(len(r)): p = mpl.patches.Circle(tri.circumcenters[i], r[i], resolution=100, edgecolor=edgecolor, facecolor=(1, 1, 1, 0), linewidth=0.2) ax.add_patch(p) pl.draw_if_interactive() def quality(func, mesh, interpolator='nn', n=33): """Compute a quality factor (the quantity r**2 from TOMS792). interpolator must be in ('linear', 'nn'). """ fz = func(mesh.x, mesh.y) tri = Triangulation(mesh.x, mesh.y) intp = getattr(tri, interpolator + '_extrapolator')(fz, bbox=(0., 1., 0., 1.)) Y, X = np.mgrid[0:1:complex(0, n), 0:1:complex(0, n)] Z = func(X, Y) iz = intp[0:1:complex(0, n), 0:1:complex(0, n)] #nans = np.isnan(iz) #numgood = n*n - np.sum(np.array(nans.flat, np.int32)) numgood = n * n SE = (Z - iz) ** 2 SSE = np.sum(SE.flat) meanZ = np.sum(Z.flat) / numgood SM = (Z - meanZ) ** 2 SSM = np.sum(SM.flat) r2 = 1.0 - SSE / SSM print(func.__name__, r2, SSE, SSM, numgood) return r2 def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33): results = {} kv = list(six.iteritems(data)) kv.sort() for name, mesh in kv: reslist = results.setdefault(name, []) for func in allfuncs: reslist.append(quality(func, mesh, interpolator, n)) return results def funky(): x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6]) y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65]) tx = 0.46 ty = 0.23 t0 = Triangulation(x0, y0) t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty]))) return t0, t1
apache-2.0
PanDAWMS/panda-server
pandaserver/taskbuffer/WrappedPickle.py
1
2703
import sys from io import BytesIO try: # python 2 import cPickle as pickle from copy_reg import _reconstructor as map__reconstructor except ImportError: # python 3 import pickle from copyreg import _reconstructor as map__reconstructor # define Unpickler if pickle.__name__ == 'cPickle': # python 2 Common_Unpickler = pickle.Unpickler else: # python 3 class Common_Unpickler(pickle.Unpickler): def __setattr__(self, key, value): if key == 'find_global': pickle.Unpickler.__setattr__(self, 'find_class', value) else: pickle.Unpickler.__setattr__(self, key, value) # wrapper to avoid de-serializing unsafe objects class WrappedPickle(object): # allowed modules and classes allowedModClass = { 'copy_reg' : ['_reconstructor'], '__builtin__' : ['object'], 'datetime' : ['datetime'], 'taskbuffer.JobSpec' : ['JobSpec'], 'taskbuffer.FileSpec' : ['FileSpec'], 'pandaserver.taskbuffer.JobSpec': ['JobSpec'], 'pandaserver.taskbuffer.FileSpec' : ['FileSpec'], } # bare modules bareMods = {'taskbuffer.': 'pandaserver.'} # predefined class map predefined_class = {('copy_reg', '_reconstructor'): map__reconstructor, ('__builtin__', 'object'): object} # check module and class @classmethod def find_class(cls,module,name): # append prefix to bare modules for bareMod in cls.bareMods: if module.startswith(bareMod): module = cls.bareMods[bareMod] + module break # check module if module not in cls.allowedModClass: raise pickle.UnpicklingError('Attempting to import disallowed module %s' % module) # return predefined class key = (module, name) if key in cls.predefined_class: return cls.predefined_class[key] # import module __import__(module) mod = sys.modules[module] # check class if name not in cls.allowedModClass[module]: raise pickle.UnpicklingError('Attempting to get disallowed class %s in %s' % (name,module)) klass = getattr(mod,name) return klass # loads @classmethod def loads(cls,pickle_string): if isinstance(pickle_string, str): pickle_string = pickle_string.encode() pickle_obj = Common_Unpickler(BytesIO(pickle_string)) pickle_obj.find_global = cls.find_class return pickle_obj.load() # dumps @classmethod def dumps(cls, obj): return pickle.dumps(obj, protocol=0)
apache-2.0
akionakamura/scikit-learn
examples/covariance/plot_outlier_detection.py
235
3891
""" ========================================== Outlier detection with several methods. ========================================== When the amount of contamination is known, this example illustrates two different ways of performing :ref:`outlier_detection`: - based on a robust estimator of covariance, which is assuming that the data are Gaussian distributed and performs better than the One-Class SVM in that case. - using the One-Class SVM and its ability to capture the shape of the data set, hence performing better when the data is strongly non-Gaussian, i.e. with two well-separated clusters; The ground truth about inliers and outliers is given by the points colors while the orange-filled area indicates which points are reported as inliers by each method. Here, we assume that we know the fraction of outliers in the datasets. Thus rather than using the 'predict' method of the objects, we set the threshold on the decision_function to separate out the corresponding fraction. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt import matplotlib.font_manager from scipy import stats from sklearn import svm from sklearn.covariance import EllipticEnvelope # Example settings n_samples = 200 outliers_fraction = 0.25 clusters_separation = [0, 1, 2] # define two outlier detection tools to be compared classifiers = { "One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05, kernel="rbf", gamma=0.1), "robust covariance estimator": EllipticEnvelope(contamination=.1)} # Compare given classifiers under given settings xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500)) n_inliers = int((1. - outliers_fraction) * n_samples) n_outliers = int(outliers_fraction * n_samples) ground_truth = np.ones(n_samples, dtype=int) ground_truth[-n_outliers:] = 0 # Fit the problem with varying cluster separation for i, offset in enumerate(clusters_separation): np.random.seed(42) # Data generation X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset X = np.r_[X1, X2] # Add outliers X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))] # Fit the model with the One-Class SVM plt.figure(figsize=(10, 5)) for i, (clf_name, clf) in enumerate(classifiers.items()): # fit the data and tag outliers clf.fit(X) y_pred = clf.decision_function(X).ravel() threshold = stats.scoreatpercentile(y_pred, 100 * outliers_fraction) y_pred = y_pred > threshold n_errors = (y_pred != ground_truth).sum() # plot the levels lines and the points Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) subplot = plt.subplot(1, 2, i + 1) subplot.set_title("Outlier detection") subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7), cmap=plt.cm.Blues_r) a = subplot.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red') subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='orange') b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white') c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black') subplot.axis('tight') subplot.legend( [a.collections[0], b, c], ['learned decision function', 'true inliers', 'true outliers'], prop=matplotlib.font_manager.FontProperties(size=11)) subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors)) subplot.set_xlim((-7, 7)) subplot.set_ylim((-7, 7)) plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26) plt.show()
bsd-3-clause
YzPaul3/h2o-3
h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py
8
6378
from __future__ import print_function from builtins import zip from builtins import range import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils from h2o.estimators.gbm import H2OGradientBoostingEstimator import random def weights_var_imp(): def check_same(data1, data2, min_rows_scale): gbm1_regression = H2OGradientBoostingEstimator(min_rows=5, ntrees=5, max_depth=5) gbm1_regression.train(x=["displacement", "power", "weight", "acceleration", "year"], y="economy", training_frame=data1) gbm2_regression = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale, ntrees=5, max_depth=5) gbm2_regression.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"], y="economy", training_frame=data2, weights_column="weights") gbm1_binomial = H2OGradientBoostingEstimator(min_rows=5, distribution="bernoulli", ntrees=5, max_depth=5) gbm1_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"], y="economy_20mpg", training_frame=data1) gbm2_binomial = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale, distribution="bernoulli", ntrees=5, max_depth=5) gbm2_binomial.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"], y="economy_20mpg", training_frame=data2, weights_column="weights") gbm1_multinomial = H2OGradientBoostingEstimator(min_rows=5, distribution="multinomial", ntrees=5, max_depth=5) gbm1_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"], y="cylinders", training_frame=data1) gbm2_multinomial = H2OGradientBoostingEstimator(min_rows=5*min_rows_scale, distribution="multinomial", ntrees=5, max_depth=5) gbm2_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year", "weights"], y="cylinders", weights_column="weights", training_frame=data2) reg1_vi = gbm1_regression.varimp() reg2_vi = gbm2_regression.varimp() bin1_vi = gbm1_binomial.varimp() bin2_vi = gbm2_binomial.varimp() mul1_vi = gbm1_multinomial.varimp() mul2_vi = gbm2_multinomial.varimp() print("Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)) print("Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)) print("Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)) for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2) for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2) for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2) h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv")) h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor() h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor() # uniform weights same as no weights weight = random.randint(1,10) uniform_weights = [[weight] for _ in range(406)] h2o_uniform_weights = h2o.H2OFrame(uniform_weights) h2o_uniform_weights.set_names(["weights"]) h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights) print("\n\nChecking that using uniform weights is equivalent to no weights:") check_same(h2o_cars_data, h2o_data_uniform_weights, weight) # zero weights same as removed observations zero_weights = [[0 if random.randint(0,1) else 1] for r in range(406)] h2o_zero_weights = h2o.H2OFrame(zero_weights) h2o_zero_weights.set_names(["weights"]) h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights) h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1] print("\n\nChecking that using some zero weights is equivalent to removing those observations:") check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1) # doubled weights same as doubled observations doubled_weights = [[1 if random.randint(0,1) else 2] for r in range(406)] h2o_doubled_weights = h2o.H2OFrame(doubled_weights) h2o_doubled_weights.set_names(["weights"]) h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights) doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False) colnames = doubled_data.pop(0) for idx, w in enumerate(doubled_weights[0]): if w == 2: doubled_data.append(doubled_data[idx]) h2o_data_doubled = h2o.H2OFrame(doubled_data) h2o_data_doubled.set_names(list(colnames)) h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor() h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor() h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor() h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor() print("\n\nChecking that doubling some weights is equivalent to doubling those observations:") check_same(h2o_data_doubled, h2o_data_doubled_weights, 1) if __name__ == "__main__": pyunit_utils.standalone_test(weights_var_imp) else: weights_var_imp()
apache-2.0
fcollonval/matplotlib_qtquick_playground
backend/backend_qtquick5/backend_qquick5agg.py
1
34813
import ctypes import os import sys import traceback import matplotlib from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.backend_bases import cursors from matplotlib.figure import Figure from matplotlib.backends.backend_qt5 import TimerQT from matplotlib.externals import six from PyQt5 import QtCore, QtGui, QtQuick, QtWidgets DEBUG = False class MatplotlibIconProvider(QtQuick.QQuickImageProvider): """ This class provide the matplotlib icons for the navigation toolbar. """ def __init__(self, img_type = QtQuick.QQuickImageProvider.Pixmap): self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images') QtQuick.QQuickImageProvider.__init__(self, img_type) def requestImage(self, id, size): img = QtGui.QImage(os.path.join(self.basedir, id + '.png')) size = img.size() return img, size def requestPixmap(self, id, size): img, size = self.requestImage(id, size) pixmap = QtGui.QPixmap.fromImage(img) return pixmap, size class FigureCanvasQtQuickAgg(QtQuick.QQuickPaintedItem, FigureCanvasAgg): """ This class creates a QtQuick Item encapsulating a Matplotlib Figure and all the functions to interact with the 'standard' Matplotlib navigation toolbar. """ # map Qt button codes to MouseEvent's ones: buttond = { QtCore.Qt.LeftButton: 1, QtCore.Qt.MidButton: 2, QtCore.Qt.RightButton: 3, # QtCore.Qt.XButton1: None, # QtCore.Qt.XButton2: None, } cursord = { cursors.MOVE: QtCore.Qt.SizeAllCursor, cursors.HAND: QtCore.Qt.PointingHandCursor, cursors.POINTER: QtCore.Qt.ArrowCursor, cursors.SELECT_REGION: QtCore.Qt.CrossCursor, } messageChanged = QtCore.pyqtSignal(str) leftChanged = QtCore.pyqtSignal() rightChanged = QtCore.pyqtSignal() topChanged = QtCore.pyqtSignal() bottomChanged = QtCore.pyqtSignal() wspaceChanged = QtCore.pyqtSignal() hspaceChanged = QtCore.pyqtSignal() def __init__(self, figure, parent=None, coordinates=True): if DEBUG: print('FigureCanvasQtQuickAgg qtquick5: ', figure) # _create_qApp() if figure is None: figure = Figure((6.0, 4.0)) QtQuick.QQuickPaintedItem.__init__(self, parent=parent) FigureCanvasAgg.__init__(self, figure=figure) self._drawRect = None self.blitbox = None # Activate hover events and mouse press events self.setAcceptHoverEvents(True) self.setAcceptedMouseButtons(QtCore.Qt.AllButtons) self._agg_draw_pending = False def getFigure(self): return self.figure def drawRectangle(self, rect): self._drawRect = rect self.update() def paint(self, p): """ Copy the image from the Agg canvas to the qt.drawable. In Qt, all drawing should be done inside of here when a widget is shown onscreen. """ # if the canvas does not have a renderer, then give up and wait for # FigureCanvasAgg.draw(self) to be called if not hasattr(self, 'renderer'): return if DEBUG: print('FigureCanvasQtQuickAgg.paint: ', self, self.get_width_height()) if self.blitbox is None: # matplotlib is in rgba byte order. QImage wants to put the bytes # into argb format and is in a 4 byte unsigned int. Little endian # system is LSB first and expects the bytes in reverse order # (bgra). if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian: stringBuffer = self.renderer._renderer.tostring_bgra() else: stringBuffer = self.renderer._renderer.tostring_argb() refcnt = sys.getrefcount(stringBuffer) # convert the Agg rendered image -> qImage qImage = QtGui.QImage(stringBuffer, self.renderer.width, self.renderer.height, QtGui.QImage.Format_ARGB32) # get the rectangle for the image rect = qImage.rect() # p = QtGui.QPainter(self) # reset the image area of the canvas to be the back-ground color p.eraseRect(rect) # draw the rendered image on to the canvas p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage)) # draw the zoom rectangle to the QPainter if self._drawRect is not None: p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine)) x, y, w, h = self._drawRect p.drawRect(x, y, w, h) else: bbox = self.blitbox l, b, r, t = bbox.extents w = int(r) - int(l) h = int(t) - int(b) t = int(b) + h reg = self.copy_from_bbox(bbox) stringBuffer = reg.to_string_argb() qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32) pixmap = QtGui.QPixmap.fromImage(qImage) p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap) # draw the zoom rectangle to the QPainter if self._drawRect is not None: p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine)) x, y, w, h = self._drawRect p.drawRect(x, y, w, h) self.blitbox = None def draw(self): """ Draw the figure with Agg, and queue a request for a Qt draw. """ # The Agg draw is done here; delaying causes problems with code that # uses the result of the draw() to update plot elements. FigureCanvasAgg.draw(self) self.update() def draw_idle(self): """ Queue redraw of the Agg buffer and request Qt paintEvent. """ # The Agg draw needs to be handled by the same thread matplotlib # modifies the scene graph from. Post Agg draw request to the # current event loop in order to ensure thread affinity and to # accumulate multiple draw requests from event handling. # TODO: queued signal connection might be safer than singleShot if not self._agg_draw_pending: self._agg_draw_pending = True QtCore.QTimer.singleShot(0, self.__draw_idle_agg) def __draw_idle_agg(self, *args): if self.height() < 0 or self.width() < 0: self._agg_draw_pending = False return try: FigureCanvasAgg.draw(self) self.update() except Exception: # Uncaught exceptions are fatal for PyQt5, so catch them instead. traceback.print_exc() finally: self._agg_draw_pending = False def blit(self, bbox=None): """ Blit the region in bbox """ # If bbox is None, blit the entire canvas. Otherwise # blit only the area defined by the bbox. if bbox is None and self.figure: bbox = self.figure.bbox self.blitbox = bbox l, b, w, h = bbox.bounds t = b + h self.repaint(l, self.renderer.height-t, w, h) def geometryChanged(self, new_geometry, old_geometry): w = new_geometry.width() h = new_geometry.height() if (w <= 0.0) and (h <= 0.0): return if DEBUG: print('resize (%d x %d)' % (w, h)) print("FigureCanvasQtQuickAgg.geometryChanged(%d, %d)" % (w, h)) dpival = self.figure.dpi winch = w / dpival hinch = h / dpival self.figure.set_size_inches(winch, hinch) FigureCanvasAgg.resize_event(self) self.draw_idle() QtQuick.QQuickPaintedItem.geometryChanged(self, new_geometry, old_geometry) def hoverEnterEvent(self, event): FigureCanvasAgg.enter_notify_event(self, guiEvent=event) def hoverLeaveEvent(self, event): QtWidgets.QApplication.restoreOverrideCursor() FigureCanvasAgg.leave_notify_event(self, guiEvent=event) def hoverMoveEvent(self, event): x = event.pos().x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.pos().y() FigureCanvasAgg.motion_notify_event(self, x, y, guiEvent=event) # if DEBUG: # print('hover move') # hoverMoveEvent kicks in when no mouse buttons are pressed # otherwise mouseMoveEvent are emitted def mouseMoveEvent(self, event): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() FigureCanvasAgg.motion_notify_event(self, x, y, guiEvent=event) # if DEBUG: # print('mouse move') def mousePressEvent(self, event): x = event.pos().x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.pos().y() button = self.buttond.get(event.button()) if button is not None: FigureCanvasAgg.button_press_event(self, x, y, button, guiEvent=event) if DEBUG: print('button pressed:', event.button()) def mouseReleaseEvent(self, event): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() button = self.buttond.get(event.button()) if button is not None: FigureCanvasAgg.button_release_event(self, x, y, button, guiEvent=event) if DEBUG: print('button released') def mouseDoubleClickEvent(self, event): x = event.pos().x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.pos().y() button = self.buttond.get(event.button()) if button is not None: FigureCanvasAgg.button_press_event(self, x, y, button, dblclick=True, guiEvent=event) if DEBUG: print('button doubleclicked:', event.button()) def wheelEvent(self, event): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() # from QWheelEvent::delta doc if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0: steps = event.angleDelta().y() / 120 else: steps = event.pixelDelta().y() if steps != 0: FigureCanvasAgg.scroll_event(self, x, y, steps, guiEvent=event) if DEBUG: print('scroll event: ' 'steps = %i ' % (steps)) def keyPressEvent(self, event): key = self._get_key(event) if key is None: return FigureCanvasAgg.key_press_event(self, key, guiEvent=event) if DEBUG: print('key press', key) def keyReleaseEvent(self, event): key = self._get_key(event) if key is None: return FigureCanvasAgg.key_release_event(self, key, guiEvent=event) if DEBUG: print('key release', key) def _get_key(self, event): if event.isAutoRepeat(): return None event_key = event.key() event_mods = int(event.modifiers()) # actually a bitmask # get names of the pressed modifier keys # bit twiddling to pick out modifier keys from event_mods bitmask, # if event_key is a MODIFIER, it should not be duplicated in mods mods = [name for name, mod_key, qt_key in MODIFIER_KEYS if event_key != qt_key and (event_mods & mod_key) == mod_key] try: # for certain keys (enter, left, backspace, etc) use a word for the # key, rather than unicode key = SPECIAL_KEYS[event_key] except KeyError: # unicode defines code points up to 0x0010ffff # QT will use Key_Codes larger than that for keyboard keys that are # are not unicode characters (like multimedia keys) # skip these # if you really want them, you should add them to SPECIAL_KEYS MAX_UNICODE = 0x10ffff if event_key > MAX_UNICODE: return None key = six.unichr(event_key) # qt delivers capitalized letters. fix capitalization # note that capslock is ignored if 'shift' in mods: mods.remove('shift') else: key = key.lower() mods.reverse() return '+'.join(mods + [key]) def new_timer(self, *args, **kwargs): """ Creates a new backend-specific subclass of :class:`backend_bases.Timer`. This is useful for getting periodic events through the backend's native event loop. Implemented only for backends with GUIs. optional arguments: *interval* Timer interval in milliseconds *callbacks* Sequence of (func, args, kwargs) where func(*args, **kwargs) will be executed by the timer every *interval*. """ return TimerQT(*args, **kwargs) def flush_events(self): global qApp qApp.processEvents() def start_event_loop(self, timeout): FigureCanvasAgg.start_event_loop_default(self, timeout) start_event_loop.__doc__ = \ FigureCanvasAgg.start_event_loop_default.__doc__ def stop_event_loop(self): FigureCanvasAgg.stop_event_loop_default(self) stop_event_loop.__doc__ = FigureCanvasAgg.stop_event_loop_default.__doc__ class FigureQtQuickAggToolbar(FigureCanvasQtQuickAgg): """ This class creates a QtQuick Item encapsulating a Matplotlib Figure and all the functions to interact with the 'standard' Matplotlib navigation toolbar. """ cursord = { cursors.MOVE: QtCore.Qt.SizeAllCursor, cursors.HAND: QtCore.Qt.PointingHandCursor, cursors.POINTER: QtCore.Qt.ArrowCursor, cursors.SELECT_REGION: QtCore.Qt.CrossCursor, } messageChanged = QtCore.pyqtSignal(str) leftChanged = QtCore.pyqtSignal() rightChanged = QtCore.pyqtSignal() topChanged = QtCore.pyqtSignal() bottomChanged = QtCore.pyqtSignal() wspaceChanged = QtCore.pyqtSignal() hspaceChanged = QtCore.pyqtSignal() def __init__(self, figure, parent=None, coordinates=True): if DEBUG: print('FigureQtQuickAggToolbar qtquick5: ', figure) FigureCanvasQtQuickAgg.__init__(self, figure=figure, parent=parent) self._message = "" # # Attributes from NavigationToolbar2QT # self.coordinates = coordinates self._actions = {} # reference holder for subplots_adjust window self.adj_window = None # # Attributes from NavigationToolbar2 # self.canvas = self.figure.canvas self.toolbar = self # a dict from axes index to a list of view limits self._views = matplotlib.cbook.Stack() self._positions = matplotlib.cbook.Stack() # stack of subplot positions self._xypress = None # the location and axis info at the time # of the press self._idPress = None self._idRelease = None self._active = None self._lastCursor = None self._idDrag = self.canvas.mpl_connect( 'motion_notify_event', self.mouse_move) self._ids_zoom = [] self._zoom_mode = None self._button_pressed = None # determined by the button pressed # at start self.mode = '' # a mode string for the status bar self.set_history_buttons() # # Store margin # self._defaults = {} for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ): val = getattr(self.figure.subplotpars, attr) self._defaults[attr] = val setattr(self, attr, val) @QtCore.pyqtProperty('QString', notify=messageChanged) def message(self): return self._message @message.setter def message(self, msg): if msg != self._message: self._message = msg self.messageChanged.emit(msg) @QtCore.pyqtProperty('QString', constant=True) def defaultDirectory(self): startpath = matplotlib.rcParams.get('savefig.directory', '') return os.path.expanduser(startpath) @QtCore.pyqtProperty('QStringList', constant=True) def fileFilters(self): filetypes = self.canvas.get_supported_filetypes_grouped() sorted_filetypes = list(six.iteritems(filetypes)) sorted_filetypes.sort() filters = [] for name, exts in sorted_filetypes: exts_list = " ".join(['*.%s' % ext for ext in exts]) filter = '%s (%s)' % (name, exts_list) filters.append(filter) return filters @QtCore.pyqtProperty('QString', constant=True) def defaultFileFilter(self): default_filetype = self.canvas.get_default_filetype() selectedFilter = None for filter in self.fileFilters: exts = filter.split('(', maxsplit=1)[1] exts = exts[:-1].split() if default_filetype in exts: selectedFilter = filter break if selectedFilter is None: selectedFilter = self.fileFilters[0] return selectedFilter @QtCore.pyqtProperty(float, notify=leftChanged) def left(self): return self.figure.subplotpars.left @left.setter def left(self, value): if value != self.figure.subplotpars.left: self.figure.subplots_adjust(left=value) self.leftChanged.emit() self.figure.canvas.draw_idle() @QtCore.pyqtProperty(float, notify=rightChanged) def right(self): return self.figure.subplotpars.right @right.setter def right(self, value): if value != self.figure.subplotpars.right: self.figure.subplots_adjust(right=value) self.rightChanged.emit() self.figure.canvas.draw_idle() @QtCore.pyqtProperty(float, notify=topChanged) def top(self): return self.figure.subplotpars.top @top.setter def top(self, value): if value != self.figure.subplotpars.top: self.figure.subplots_adjust(top=value) self.topChanged.emit() self.figure.canvas.draw_idle() @QtCore.pyqtProperty(float, notify=bottomChanged) def bottom(self): return self.figure.subplotpars.bottom @bottom.setter def bottom(self, value): if value != self.figure.subplotpars.bottom: self.figure.subplots_adjust(bottom=value) self.bottomChanged.emit() self.figure.canvas.draw_idle() @QtCore.pyqtProperty(float, notify=hspaceChanged) def hspace(self): return self.figure.subplotpars.hspace @hspace.setter def hspace(self, value): if value != self.figure.subplotpars.hspace: self.figure.subplots_adjust(hspace=value) self.hspaceChanged.emit() self.figure.canvas.draw_idle() @QtCore.pyqtProperty(float, notify=wspaceChanged) def wspace(self): return self.figure.subplotpars.wspace @wspace.setter def wspace(self, value): if value != self.figure.subplotpars.wspace: self.figure.subplots_adjust(wspace=value) self.wspaceChanged.emit() self.figure.canvas.draw_idle() def mouse_move(self, event): self._set_cursor(event) if event.inaxes and event.inaxes.get_navigate(): try: s = event.inaxes.format_coord(event.xdata, event.ydata) except (ValueError, OverflowError): pass else: artists = [a for a in event.inaxes.mouseover_set if a.contains(event)] if artists: a = max(enumerate(artists), key=lambda x: x[1].zorder)[1] if a is not event.inaxes.patch: data = a.get_cursor_data(event) if data is not None: s += ' [{:s}]'.format(a.format_cursor_data(data)) if len(self.mode): self.message = '{:s}, {:s}'.format(self.mode, s) else: self.message = s else: self.message = self.mode def dynamic_update(self): self.canvas.draw_idle() def push_current(self): """push the current view limits and position onto the stack""" views = [] pos = [] for a in self.canvas.figure.get_axes(): views.append(a._get_view()) # Store both the original and modified positions pos.append(( a.get_position(True).frozen(), a.get_position().frozen())) self._views.push(views) self._positions.push(pos) self.set_history_buttons() def set_history_buttons(self): """Enable or disable back/forward button""" pass def _update_view(self): """Update the viewlim and position from the view and position stack for each axes """ views = self._views() if views is None: return pos = self._positions() if pos is None: return for i, a in enumerate(self.canvas.figure.get_axes()): a._set_view(views[i]) # Restore both the original and modified positions a.set_position(pos[i][0], 'original') a.set_position(pos[i][1], 'active') self.canvas.draw_idle() @QtCore.pyqtSlot() def home(self, *args): """Restore the original view""" self._views.home() self._positions.home() self.set_history_buttons() self._update_view() @QtCore.pyqtSlot() def forward(self, *args): """Move forward in the view lim stack""" self._views.forward() self._positions.forward() self.set_history_buttons() self._update_view() @QtCore.pyqtSlot() def back(self, *args): """move back up the view lim stack""" self._views.back() self._positions.back() self.set_history_buttons() self._update_view() def _set_cursor(self, event): if not event.inaxes or not self._active: if self._lastCursor != cursors.POINTER: self.set_cursor(cursors.POINTER) self._lastCursor = cursors.POINTER else: if self._active == 'ZOOM': if self._lastCursor != cursors.SELECT_REGION: self.set_cursor(cursors.SELECT_REGION) self._lastCursor = cursors.SELECT_REGION elif (self._active == 'PAN' and self._lastCursor != cursors.MOVE): self.set_cursor(cursors.MOVE) self._lastCursor = cursors.MOVE def set_cursor(self, cursor): """ Set the current cursor to one of the :class:`Cursors` enums values """ if DEBUG: print('Set cursor', cursor) self.canvas.setCursor(self.cursord[cursor]) def draw_with_locators_update(self): """Redraw the canvases, update the locators""" for a in self.canvas.figure.get_axes(): xaxis = getattr(a, 'xaxis', None) yaxis = getattr(a, 'yaxis', None) locators = [] if xaxis is not None: locators.append(xaxis.get_major_locator()) locators.append(xaxis.get_minor_locator()) if yaxis is not None: locators.append(yaxis.get_major_locator()) locators.append(yaxis.get_minor_locator()) for loc in locators: loc.refresh() self.canvas.draw_idle() def press(self, event): """Called whenever a mouse button is pressed.""" pass def press_pan(self, event): """the press mouse button in pan/zoom mode callback""" if event.button == 1: self._button_pressed = 1 elif event.button == 3: self._button_pressed = 3 else: self._button_pressed = None return x, y = event.x, event.y # push the current view to define home if stack is empty if self._views.empty(): self.push_current() self._xypress = [] for i, a in enumerate(self.canvas.figure.get_axes()): if (x is not None and y is not None and a.in_axes(event) and a.get_navigate() and a.can_pan()): a.start_pan(x, y, event.button) self._xypress.append((a, i)) self.canvas.mpl_disconnect(self._idDrag) self._idDrag = self.canvas.mpl_connect('motion_notify_event', self.drag_pan) self.press(event) def release(self, event): """this will be called whenever mouse button is released""" pass def release_pan(self, event): """the release mouse button callback in pan/zoom mode""" if self._button_pressed is None: return self.canvas.mpl_disconnect(self._idDrag) self._idDrag = self.canvas.mpl_connect( 'motion_notify_event', self.mouse_move) for a, ind in self._xypress: a.end_pan() if not self._xypress: return self._xypress = [] self._button_pressed = None self.push_current() self.release(event) self.draw_with_locators_update() def drag_pan(self, event): """the drag callback in pan/zoom mode""" for a, ind in self._xypress: #safer to use the recorded button at the press than current button: #multiple button can get pressed during motion... a.drag_pan(self._button_pressed, event.key, event.x, event.y) self.dynamic_update() @QtCore.pyqtSlot() def pan(self, *args): """Activate the pan/zoom tool. pan with left button, zoom with right""" # set the pointer icon and button press funcs to the # appropriate callbacks if self._active == 'PAN': self._active = None else: self._active = 'PAN' if self._idPress is not None: self._idPress = self.canvas.mpl_disconnect(self._idPress) self.mode = '' if self._idRelease is not None: self._idRelease = self.canvas.mpl_disconnect(self._idRelease) self.mode = '' if self._active: self._idPress = self.canvas.mpl_connect( 'button_press_event', self.press_pan) self._idRelease = self.canvas.mpl_connect( 'button_release_event', self.release_pan) self.mode = 'pan/zoom' self.canvas.widgetlock(self) else: self.canvas.widgetlock.release(self) for a in self.canvas.figure.get_axes(): a.set_navigate_mode(self._active) self.message = self.mode def draw_rubberband(self, event, x0, y0, x1, y1): """Draw a rectangle rubberband to indicate zoom limits""" height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 w = abs(x1 - x0) h = abs(y1 - y0) rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)] self.canvas.drawRectangle(rect) def remove_rubberband(self): """Remove the rubberband""" self.canvas.drawRectangle(None) def _switch_on_zoom_mode(self, event): self._zoom_mode = event.key self.mouse_move(event) def _switch_off_zoom_mode(self, event): self._zoom_mode = None self.mouse_move(event) def drag_zoom(self, event): """the drag callback in zoom mode""" if self._xypress: x, y = event.x, event.y lastx, lasty, a, ind, view = self._xypress[0] # adjust x, last, y, last x1, y1, x2, y2 = a.bbox.extents x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2) y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2) if self._zoom_mode == "x": x1, y1, x2, y2 = a.bbox.extents y, lasty = y1, y2 elif self._zoom_mode == "y": x1, y1, x2, y2 = a.bbox.extents x, lastx = x1, x2 self.draw_rubberband(event, x, y, lastx, lasty) def press_zoom(self, event): """the press mouse button in zoom to rect mode callback""" # If we're already in the middle of a zoom, pressing another # button works to "cancel" if self._ids_zoom != []: for zoom_id in self._ids_zoom: self.canvas.mpl_disconnect(zoom_id) self.release(event) self.draw_with_locators_update() self._xypress = None self._button_pressed = None self._ids_zoom = [] return if event.button == 1: self._button_pressed = 1 elif event.button == 3: self._button_pressed = 3 else: self._button_pressed = None return x, y = event.x, event.y # push the current view to define home if stack is empty if self._views.empty(): self.push_current() self._xypress = [] for i, a in enumerate(self.canvas.figure.get_axes()): if (x is not None and y is not None and a.in_axes(event) and a.get_navigate() and a.can_zoom()): self._xypress.append((x, y, a, i, a._get_view())) id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom) id2 = self.canvas.mpl_connect('key_press_event', self._switch_on_zoom_mode) id3 = self.canvas.mpl_connect('key_release_event', self._switch_off_zoom_mode) self._ids_zoom = id1, id2, id3 self._zoom_mode = event.key self.press(event) def release_zoom(self, event): """the release mouse button callback in zoom to rect mode""" for zoom_id in self._ids_zoom: self.canvas.mpl_disconnect(zoom_id) self._ids_zoom = [] self.remove_rubberband() if not self._xypress: return last_a = [] for cur_xypress in self._xypress: x, y = event.x, event.y lastx, lasty, a, ind, view = cur_xypress # ignore singular clicks - 5 pixels is a threshold # allows the user to "cancel" a zoom action # by zooming by less than 5 pixels if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or (abs(y - lasty) < 5 and self._zoom_mode!="x")): self._xypress = None self.release(event) self.draw_with_locators_update() return # detect twinx,y axes and avoid double zooming twinx, twiny = False, False if last_a: for la in last_a: if a.get_shared_x_axes().joined(a, la): twinx = True if a.get_shared_y_axes().joined(a, la): twiny = True last_a.append(a) if self._button_pressed == 1: direction = 'in' elif self._button_pressed == 3: direction = 'out' else: continue a._set_view_from_bbox((lastx, lasty, x, y), direction, self._zoom_mode, twinx, twiny) self.draw_with_locators_update() self._xypress = None self._button_pressed = None self._zoom_mode = None self.push_current() self.release(event) @QtCore.pyqtSlot() def zoom(self, *args): """Activate zoom to rect mode""" if self._active == 'ZOOM': self._active = None else: self._active = 'ZOOM' if self._idPress is not None: self._idPress = self.canvas.mpl_disconnect(self._idPress) self.mode = '' if self._idRelease is not None: self._idRelease = self.canvas.mpl_disconnect(self._idRelease) self.mode = '' if self._active: self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom) self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom) self.mode = 'zoom rect' self.canvas.widgetlock(self) else: self.canvas.widgetlock.release(self) for a in self.canvas.figure.get_axes(): a.set_navigate_mode(self._active) self.message = self.mode @QtCore.pyqtSlot() def tight_layout(self): self.figure.tight_layout() # self._setSliderPositions() self.draw_idle() @QtCore.pyqtSlot() def reset_margin(self): self.figure.subplots_adjust(**self._defaults) # self._setSliderPositions() self.draw_idle() @QtCore.pyqtSlot(str) def print_figure(self, fname, *args, **kwargs): if fname: fname = QtCore.QUrl(fname).toLocalFile() # save dir for next time savefig_dir = os.path.dirname(six.text_type(fname)) matplotlib.rcParams['savefig.directory'] = savefig_dir fname = six.text_type(fname) FigureCanvasAgg.print_figure(self, fname, *args, **kwargs) self.draw() FigureCanvasQTAgg = FigureCanvasQtQuickAgg FigureCanvasQTAggToolbar = FigureQtQuickAggToolbar
mit
Abhiquant/trading-with-python
lib/functions.py
76
11627
# -*- coding: utf-8 -*- """ twp support functions @author: Jev Kuznetsov Licence: GPL v2 """ from scipy import polyfit, polyval import datetime as dt #from datetime import datetime, date from pandas import DataFrame, Index, Series import csv import matplotlib.pyplot as plt import numpy as np import pandas as pd def nans(shape, dtype=float): ''' create a nan numpy array ''' a = np.empty(shape, dtype) a.fill(np.nan) return a def plotCorrelationMatrix(price, thresh = None): ''' plot a correlation matrix as a heatmap image inputs: price: prices DataFrame thresh: correlation threshold to use for checking, default None ''' symbols = price.columns.tolist() R = price.pct_change() correlationMatrix = R.corr() if thresh is not None: correlationMatrix = correlationMatrix > thresh plt.imshow(abs(correlationMatrix.values),interpolation='none') plt.xticks(range(len(symbols)),symbols) plt.yticks(range(len(symbols)),symbols) plt.colorbar() plt.title('Correlation matrix') return correlationMatrix def pca(A): """ performs principal components analysis (PCA) on the n-by-p DataFrame A Rows of A correspond to observations, columns to variables. Returns : coeff : principal components, column-wise transform: A in principal component space latent : eigenvalues """ # computing eigenvalues and eigenvectors of covariance matrix M = (A - A.mean()).T # subtract the mean (along columns) [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted idx = np.argsort(latent) # sort eigenvalues idx = idx[::-1] # in ascending order coeff = coeff[:,idx] latent = latent[idx] score = np.dot(coeff.T,A.T) # projection of the data in the new space transform = DataFrame(index = A.index, data = score.T) return coeff,transform,latent def pos2pnl(price,position , ibTransactionCost=False ): """ calculate pnl based on price and position Inputs: --------- price: series or dataframe of price position: number of shares at each time. Column names must be same as in price ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share Returns a portfolio DataFrame """ delta=position.diff() port = DataFrame(index=price.index) if isinstance(price,Series): # no need to sum along 1 for series port['cash'] = (-delta*price).cumsum() port['stock'] = (position*price) else: # dealing with DataFrame here port['cash'] = (-delta*price).sum(axis=1).cumsum() port['stock'] = (position*price).sum(axis=1) if ibTransactionCost: tc = -0.005*position.diff().abs() # basic transaction cost tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$ if isinstance(price,DataFrame): tc = tc.sum(axis=1) port['tc'] = tc.cumsum() else: port['tc'] = 0. port['total'] = port['stock']+port['cash']+port['tc'] return port def tradeBracket(price,entryBar,maxTradeLength,bracket): ''' trade a symmetrical bracket on price series, return price delta and exit bar # Input ------ price : series of price values entryBar: entry bar number maxTradeLength : max trade duration in bars bracket : allowed price deviation ''' lastBar = min(entryBar+maxTradeLength,len(price)-1) p = price[entryBar:lastBar]-price[entryBar] idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket if idxOutOfBound[0].any(): # found match priceDelta = p[idxOutOfBound[0][0]] exitBar = idxOutOfBound[0][0]+entryBar else: # all in bracket, exiting based on time priceDelta = p[-1] exitBar = lastBar return priceDelta, exitBar def estimateBeta(priceY,priceX,algo = 'standard'): ''' estimate stock Y vs stock X beta using iterative linear regression. Outliers outside 3 sigma boundary are filtered out Parameters -------- priceX : price series of x (usually market) priceY : price series of y (estimate beta of this price) Returns -------- beta : stockY beta relative to stock X ''' X = DataFrame({'x':priceX,'y':priceY}) if algo=='returns': ret = (X/X.shift(1)-1).dropna().values #print len(ret) x = ret[:,0] y = ret[:,1] # filter high values low = np.percentile(x,20) high = np.percentile(x,80) iValid = (x>low) & (x<high) x = x[iValid] y = y[iValid] iteration = 1 nrOutliers = 1 while iteration < 10 and nrOutliers > 0 : (a,b) = polyfit(x,y,1) yf = polyval([a,b],x) #plot(x,y,'x',x,yf,'r-') err = yf-y idxOutlier = abs(err) > 3*np.std(err) nrOutliers =sum(idxOutlier) beta = a #print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers) x = x[~idxOutlier] y = y[~idxOutlier] iteration += 1 elif algo=='log': x = np.log(X['x']) y = np.log(X['y']) (a,b) = polyfit(x,y,1) beta = a elif algo=='standard': ret =np.log(X).diff().dropna() beta = ret['x'].cov(ret['y'])/ret['x'].var() else: raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'") return beta def estimateVolatility(ohlc, N=10, algo='YangZhang'): """ Volatility estimation Possible algorithms: ['YangZhang', 'CC'] """ cc = np.log(ohlc.close/ohlc.close.shift(1)) if algo == 'YangZhang': # Yang-zhang volatility ho = np.log(ohlc.high/ohlc.open) lo = np.log(ohlc.low/ohlc.open) co = np.log(ohlc.close/ohlc.open) oc = np.log(ohlc.open/ohlc.close.shift(1)) oc_sq = oc**2 cc_sq = cc**2 rs = ho*(ho-co)+lo*(lo-co) close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0)) open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0)) window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0)) result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252) result[:N-1] = np.nan elif algo == 'CC': # standard close-close estimator result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N)) else: raise ValueError('Unknown algo type.') return result*100 def rank(current,past): ''' calculate a relative rank 0..1 for a value against series ''' return (current>past).sum()/float(past.count()) def returns(df): return (df/df.shift(1)-1) def logReturns(df): t = np.log(df) return t-t.shift(1) def dateTimeToDate(idx): ''' convert datetime index to date ''' dates = [] for dtm in idx: dates.append(dtm.date()) return dates def readBiggerScreener(fName): ''' import data from Bigger Capital screener ''' with open(fName,'rb') as f: reader = csv.reader(f) rows = [row for row in reader] header = rows[0] data = [[] for i in range(len(header))] for row in rows[1:]: for i,elm in enumerate(row): try: data[i].append(float(elm)) except Exception: data[i].append(str(elm)) return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header] def sharpe(pnl): return np.sqrt(250)*pnl.mean()/pnl.std() def drawdown(s): """ calculate max drawdown and duration Input: s, price or cumulative pnl curve $ Returns: drawdown : vector of drawdwon values duration : vector of drawdown duration """ # convert to array if got pandas series, 10x speedup if isinstance(s,pd.Series): idx = s.index s = s.values returnSeries = True else: returnSeries = False if s.min() < 0: # offset if signal minimum is less than zero s = s-s.min() highwatermark = np.zeros(len(s)) drawdown = np.zeros(len(s)) drawdowndur = np.zeros(len(s)) for t in range(1,len(s)): highwatermark[t] = max(highwatermark[t-1], s[t]) drawdown[t] = (highwatermark[t]-s[t]) drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1) if returnSeries: return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur) else: return drawdown , drawdowndur def profitRatio(pnl): ''' calculate profit ratio as sum(pnl)/drawdown Input: pnl - daily pnl, Series or DataFrame ''' def processVector(pnl): # process a single column s = pnl.fillna(0) dd = drawdown(s)[0] p = s.sum()/dd.max() return p if isinstance(pnl,Series): return processVector(pnl) elif isinstance(pnl,DataFrame): p = Series(index = pnl.columns) for col in pnl.columns: p[col] = processVector(pnl[col]) return p else: raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl))) def candlestick(df,width=0.5, colorup='b', colordown='r'): ''' plot a candlestick chart of a dataframe ''' O = df['open'].values H = df['high'].values L = df['low'].values C = df['close'].values fig = plt.gcf() ax = plt.axes() #ax.hold(True) X = df.index #plot high and low ax.bar(X,height=H-L,bottom=L,width=0.1,color='k') idxUp = C>O ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup) idxDown = C<=O ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown) try: fig.autofmt_xdate() except Exception: # pragma: no cover pass ax.grid(True) #ax.bar(x,height=H-L,bottom=L,width=0.01,color='k') def datetime2matlab(t): ''' convert datetime timestamp to matlab numeric timestamp ''' mdn = t + dt.timedelta(days = 366) frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0) return mdn.toordinal() + frac def getDataSources(fName = None): ''' return data sources directories for this machine. directories are defined in datasources.ini or provided filepath''' import socket from ConfigParser import ConfigParser pcName = socket.gethostname() p = ConfigParser() p.optionxform = str if fName is None: fName = 'datasources.ini' p.read(fName) if pcName not in p.sections(): raise NameError('Host name section %s not found in file %s' %(pcName,fName)) dataSources = {} for option in p.options(pcName): dataSources[option] = p.get(pcName,option) return dataSources if __name__ == '__main__': df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]}) plt.clf() candlestick(df)
bsd-3-clause
TEAM-Gummy/platform_external_chromium_org
ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py
154
8545
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys import tempfile import time script_dir = os.path.dirname(__file__) sys.path.append(os.path.join(script_dir, '../../tools/browser_tester')) import browser_tester import browsertester.browserlauncher # This script extends browser_tester to check for the presence of # Breakpad crash dumps. # This reads a file of lines containing 'key:value' pairs. # The file contains entries like the following: # plat:Win32 # prod:Chromium # ptype:nacl-loader # rept:crash svc def ReadDumpTxtFile(filename): dump_info = {} fh = open(filename, 'r') for line in fh: if ':' in line: key, value = line.rstrip().split(':', 1) dump_info[key] = value fh.close() return dump_info def StartCrashService(browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, crash_service_exe, skip_if_missing=False): # Find crash_service.exe relative to chrome.exe. This is a bit icky. browser_dir = os.path.dirname(browser_path) crash_service_path = os.path.join(browser_dir, crash_service_exe) if skip_if_missing and not os.path.exists(crash_service_path): return proc = subprocess.Popen([crash_service_path, '--v=1', # Verbose output for debugging failures '--dumps-dir=%s' % dumps_dir, '--pipe-name=%s' % windows_pipe_name]) def Cleanup(): # Note that if the process has already exited, this will raise # an 'Access is denied' WindowsError exception, but # crash_service.exe is not supposed to do this and such # behaviour should make the test fail. proc.terminate() status = proc.wait() sys.stdout.write('crash_dump_tester: %s exited with status %s\n' % (crash_service_exe, status)) cleanup_funcs.append(Cleanup) def ListPathsInDir(dir_path): if os.path.exists(dir_path): return [os.path.join(dir_path, name) for name in os.listdir(dir_path)] else: return [] def GetDumpFiles(dumps_dirs): all_files = [filename for dumps_dir in dumps_dirs for filename in ListPathsInDir(dumps_dir)] sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files)) for dump_file in all_files: sys.stdout.write(' %s (size %i)\n' % (dump_file, os.stat(dump_file).st_size)) return [dump_file for dump_file in all_files if dump_file.endswith('.dmp')] def Main(cleanup_funcs): parser = browser_tester.BuildArgParser() parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps', type=int, default=0, help='The number of crash dumps that we should expect') parser.add_option('--expected_process_type_for_crash', dest='expected_process_type_for_crash', type=str, default='nacl-loader', help='The type of Chromium process that we expect the ' 'crash dump to be for') # Ideally we would just query the OS here to find out whether we are # running x86-32 or x86-64 Windows, but Python's win32api module # does not contain a wrapper for GetNativeSystemInfo(), which is # what NaCl uses to check this, or for IsWow64Process(), which is # what Chromium uses. Instead, we just rely on the build system to # tell us. parser.add_option('--win64', dest='win64', action='store_true', help='Pass this if we are running tests for x86-64 Windows') options, args = parser.parse_args() temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_') def CleanUpTempDir(): browsertester.browserlauncher.RemoveDirectory(temp_dir) cleanup_funcs.append(CleanUpTempDir) # To get a guaranteed unique pipe name, use the base name of the # directory we just created. windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir) # This environment variable enables Breakpad crash dumping in # non-official builds of Chromium. os.environ['CHROME_HEADLESS'] = '1' if sys.platform == 'win32': dumps_dir = temp_dir # Override the default (global) Windows pipe name that Chromium will # use for out-of-process crash reporting. os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name # Launch the x86-32 crash service so that we can handle crashes in # the browser process. StartCrashService(options.browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, 'crash_service.exe') if options.win64: # Launch the x86-64 crash service so that we can handle crashes # in the NaCl loader process (nacl64.exe). # Skip if missing, since in win64 builds crash_service.exe is 64-bit # and crash_service64.exe does not exist. StartCrashService(options.browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, 'crash_service64.exe', skip_if_missing=True) # We add a delay because there is probably a race condition: # crash_service.exe might not have finished doing # CreateNamedPipe() before NaCl does a crash dump and tries to # connect to that pipe. # TODO(mseaborn): We could change crash_service.exe to report when # it has successfully created the named pipe. time.sleep(1) elif sys.platform == 'darwin': dumps_dir = temp_dir os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir elif sys.platform.startswith('linux'): # The "--user-data-dir" option is not effective for the Breakpad # setup in Linux Chromium, because Breakpad is initialized before # "--user-data-dir" is read. So we set HOME to redirect the crash # dumps to a temporary directory. home_dir = temp_dir os.environ['HOME'] = home_dir options.enable_crash_reporter = True result = browser_tester.Run(options.url, options) # Find crash dump results. if sys.platform.startswith('linux'): # Look in "~/.config/*/Crash Reports". This will find crash # reports under ~/.config/chromium or ~/.config/google-chrome, or # under other subdirectories in case the branding is changed. dumps_dirs = [os.path.join(path, 'Crash Reports') for path in ListPathsInDir(os.path.join(home_dir, '.config'))] else: dumps_dirs = [dumps_dir] dmp_files = GetDumpFiles(dumps_dirs) failed = False msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' % (len(dmp_files), options.expected_crash_dumps)) if len(dmp_files) != options.expected_crash_dumps: sys.stdout.write(msg) failed = True for dump_file in dmp_files: # Sanity check: Make sure dumping did not fail after opening the file. msg = 'crash_dump_tester: ERROR: Dump file is empty\n' if os.stat(dump_file).st_size == 0: sys.stdout.write(msg) failed = True # On Windows, the crash dumps should come in pairs of a .dmp and # .txt file. if sys.platform == 'win32': second_file = dump_file[:-4] + '.txt' msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding ' '%r file\n' % (dump_file, second_file)) if not os.path.exists(second_file): sys.stdout.write(msg) failed = True continue # Check that the crash dump comes from the NaCl process. dump_info = ReadDumpTxtFile(second_file) if 'ptype' in dump_info: msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n' % (dump_info['ptype'], options.expected_process_type_for_crash)) if dump_info['ptype'] != options.expected_process_type_for_crash: sys.stdout.write(msg) failed = True else: sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n') failed = True # TODO(mseaborn): Ideally we would also check that a backtrace # containing an expected function name can be extracted from the # crash dump. if failed: sys.stdout.write('crash_dump_tester: FAILED\n') result = 1 else: sys.stdout.write('crash_dump_tester: PASSED\n') return result def MainWrapper(): cleanup_funcs = [] try: return Main(cleanup_funcs) finally: for func in cleanup_funcs: func() if __name__ == '__main__': sys.exit(MainWrapper())
bsd-3-clause
nanditav/15712-TensorFlow
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
9
9007
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import random import zipfile import numpy as np from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf # Step 1: Download the data. url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urllib.request.urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('text8.zip', 31344016) # Read the data into a list of strings. def read_data(filename): """Extract the first file enclosed in a zip file as a list of words""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data words = read_data(filename) print('Data size', len(words)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = 50000 def build_dataset(words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionary data, count, dictionary, reverse_dictionary = build_dataset(words) del words # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(): # Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels, num_sampled, vocabulary_size)) # Construct the SGD optimizer using a learning rate of 1.0. optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.global_variables_initializer() # Step 5: Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized") average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = "Nearest to %s:" % valid_word for k in xrange(top_k): close_word = reverse_dictionary[nearest[k]] log_str = "%s %s," % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() # Step 6: Visualize the embeddings. def plot_with_labels(low_dim_embs, labels, filename='tsne.png'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels) except ImportError: print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
apache-2.0
manipopopo/tensorflow
tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
5
12089
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TFGAN's estimator.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile from absl.testing import parameterized import numpy as np import six from tensorflow.contrib import layers from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses from tensorflow.contrib.learn.python.learn.learn_io import graph_io from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import metrics as metrics_lib from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import input as input_lib from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import training from tensorflow.python.training import training_util def generator_fn(noise_dict, mode): del mode noise = noise_dict['x'] return layers.fully_connected(noise, noise.shape[1].value) def discriminator_fn(data, unused_conditioning, mode): del unused_conditioning, mode return layers.fully_connected(data, 1) class GetGANModelTest(test.TestCase, parameterized.TestCase): """Tests that `GetGANModel` produces the correct model.""" @parameterized.named_parameters( ('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_gan_model(self, mode): with ops.Graph().as_default(): generator_inputs = {'x': array_ops.ones([3, 4])} real_data = (array_ops.zeros([3, 4]) if mode != model_fn_lib.ModeKeys.PREDICT else None) gan_model = estimator._get_gan_model( mode, generator_fn, discriminator_fn, real_data, generator_inputs, add_summaries=False) self.assertEqual(generator_inputs, gan_model.generator_inputs) self.assertIsNotNone(gan_model.generated_data) self.assertEqual(2, len(gan_model.generator_variables)) # 1 FC layer self.assertIsNotNone(gan_model.generator_fn) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertIsNone(gan_model.real_data) self.assertIsNone(gan_model.discriminator_real_outputs) self.assertIsNone(gan_model.discriminator_gen_outputs) self.assertIsNone(gan_model.discriminator_variables) self.assertIsNone(gan_model.discriminator_scope) self.assertIsNone(gan_model.discriminator_fn) else: self.assertIsNotNone(gan_model.real_data) self.assertIsNotNone(gan_model.discriminator_real_outputs) self.assertIsNotNone(gan_model.discriminator_gen_outputs) self.assertEqual(2, len(gan_model.discriminator_variables)) # 1 FC layer self.assertIsNotNone(gan_model.discriminator_scope) self.assertIsNotNone(gan_model.discriminator_fn) def get_dummy_gan_model(): # TODO(joelshor): Find a better way of creating a variable scope. with variable_scope.variable_scope('generator') as gen_scope: gen_var = variable_scope.get_variable('dummy_var', initializer=0.0) with variable_scope.variable_scope('discriminator') as dis_scope: dis_var = variable_scope.get_variable('dummy_var', initializer=0.0) return tfgan_tuples.GANModel( generator_inputs=None, generated_data=array_ops.ones([3, 4]), generator_variables=[gen_var], generator_scope=gen_scope, generator_fn=None, real_data=array_ops.zeros([3, 4]), discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var, discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var, discriminator_variables=[dis_var], discriminator_scope=dis_scope, discriminator_fn=None) def dummy_loss_fn(gan_model): return math_ops.reduce_sum(gan_model.discriminator_real_outputs - gan_model.discriminator_gen_outputs) def get_metrics(gan_model): return { 'mse_custom_metric': metrics_lib.mean_squared_error( gan_model.real_data, gan_model.generated_data) } class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase): """Tests that the EstimatorSpec is constructed appropriately.""" @classmethod def setUpClass(cls): cls._generator_optimizer = training.GradientDescentOptimizer(1.0) cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0) @parameterized.named_parameters( ('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_estimator_spec(self, mode): with ops.Graph().as_default(): self._gan_model = get_dummy_gan_model() spec = estimator._get_estimator_spec( mode, self._gan_model, generator_loss_fn=dummy_loss_fn, discriminator_loss_fn=dummy_loss_fn, get_eval_metric_ops_fn=get_metrics, generator_optimizer=self._generator_optimizer, discriminator_optimizer=self._discriminator_optimizer) self.assertEqual(mode, spec.mode) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertEqual(self._gan_model.generated_data, spec.predictions) elif mode == model_fn_lib.ModeKeys.TRAIN: self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.train_op) self.assertIsNotNone(spec.training_hooks) elif mode == model_fn_lib.ModeKeys.EVAL: self.assertEqual(self._gan_model.generated_data, spec.predictions) self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.eval_metric_ops) # TODO(joelshor): Add pandas test. class GANEstimatorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size, lr_decay=False): def make_opt(): gstep = training_util.get_or_create_global_step() lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9) return training.GradientDescentOptimizer(lr) gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) est = estimator.GANEstimator( generator_fn=generator_fn, discriminator_fn=discriminator_fn, generator_loss_fn=losses.wasserstein_generator_loss, discriminator_loss_fn=losses.wasserstein_discriminator_loss, generator_optimizer=gopt, discriminator_optimizer=dopt, get_eval_metric_ops_fn=get_metrics, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'], scores['loss']) self.assertIn('mse_custom_metric', six.iterkeys(scores)) # PREDICT predictions = np.array([x for x in est.predict(predict_input_fn)]) self.assertAllEqual(prediction_size, predictions.shape) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" input_dim = 4 batch_size = 5 data = np.zeros([batch_size, input_dim]) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, prediction_size=[batch_size, input_dim]) def test_numpy_input_fn_lrdecay(self): """Tests complete flow with numpy_input_fn.""" input_dim = 4 batch_size = 5 data = np.zeros([batch_size, input_dim]) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, prediction_size=[batch_size, input_dim], lr_decay=True) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" input_dim = 4 batch_size = 6 data = np.zeros([batch_size, input_dim]) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), 'y': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32), 'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example( serialized_examples, feature_spec) _, features = graph_io.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) _, features = graph_io.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) _, features = graph_io.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, prediction_size=[batch_size, input_dim]) if __name__ == '__main__': test.main()
apache-2.0