Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
7,500
|
@classmethod
def parseInvstmtrs(cls_, invstmtrs_list):
ret = []
for invstmtrs_ofx in invstmtrs_list:
account = InvestmentAccount()
acctid_tag = invstmtrs_ofx.find('acctid')
if (hasattr(acctid_tag, 'contents')):
try:
account.account_id = acctid_tag.contents[0].strip()
except IndexError:
account.warnings.append(
six.u("Empty acctid tag for %s") % invstmtrs_ofx)
if cls_.fail_fast:
raise
brokerid_tag = invstmtrs_ofx.find('brokerid')
if (hasattr(brokerid_tag, 'contents')):
try:
account.brokerid = brokerid_tag.contents[0].strip()
except __HOLE__:
account.warnings.append(
six.u("Empty brokerid tag for %s") % invstmtrs_ofx)
if cls_.fail_fast:
raise
account.type = AccountType.Investment
if (invstmtrs_ofx):
account.statement = cls_.parseInvestmentStatement(
invstmtrs_ofx)
ret.append(account)
return ret
|
IndexError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseInvstmtrs
|
7,501
|
@classmethod
def parseSeclist(cls_, seclist_ofx):
securityList = []
for secinfo_ofx in seclist_ofx.findAll('secinfo'):
uniqueid_tag = secinfo_ofx.find('uniqueid')
name_tag = secinfo_ofx.find('secname')
ticker_tag = secinfo_ofx.find('ticker')
memo_tag = secinfo_ofx.find('memo')
if uniqueid_tag and name_tag:
try:
ticker = ticker_tag.contents[0].strip()
except AttributeError:
# ticker can be empty
ticker = None
try:
memo = memo_tag.contents[0].strip()
except __HOLE__:
# memo can be empty
memo = None
securityList.append(
Security(uniqueid_tag.contents[0].strip(),
name_tag.contents[0].strip(),
ticker,
memo))
return securityList
|
AttributeError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseSeclist
|
7,502
|
@classmethod
def parseInvestmentPosition(cls_, ofx):
position = Position()
tag = ofx.find('uniqueid')
if (hasattr(tag, 'contents')):
position.security = tag.contents[0].strip()
tag = ofx.find('units')
if (hasattr(tag, 'contents')):
position.units = cls_.toDecimal(tag)
tag = ofx.find('unitprice')
if (hasattr(tag, 'contents')):
position.unit_price = cls_.toDecimal(tag)
tag = ofx.find('dtpriceasof')
if (hasattr(tag, 'contents')):
try:
position.date = cls_.parseOfxDateTime(tag.contents[0].strip())
except __HOLE__:
raise
return position
|
ValueError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseInvestmentPosition
|
7,503
|
@classmethod
def parseInvestmentTransaction(cls_, ofx):
transaction = InvestmentTransaction(ofx.name)
tag = ofx.find('fitid')
if (hasattr(tag, 'contents')):
transaction.id = tag.contents[0].strip()
tag = ofx.find('memo')
if (hasattr(tag, 'contents')):
transaction.memo = tag.contents[0].strip()
tag = ofx.find('dttrade')
if (hasattr(tag, 'contents')):
try:
transaction.tradeDate = cls_.parseOfxDateTime(
tag.contents[0].strip())
except __HOLE__:
raise
tag = ofx.find('dtsettle')
if (hasattr(tag, 'contents')):
try:
transaction.settleDate = cls_.parseOfxDateTime(
tag.contents[0].strip())
except ValueError:
raise
tag = ofx.find('uniqueid')
if (hasattr(tag, 'contents')):
transaction.security = tag.contents[0].strip()
tag = ofx.find('incometype')
if (hasattr(tag, 'contents')):
transaction.income_type = tag.contents[0].strip()
tag = ofx.find('units')
if (hasattr(tag, 'contents')):
transaction.units = cls_.toDecimal(tag)
tag = ofx.find('unitprice')
if (hasattr(tag, 'contents')):
transaction.unit_price = cls_.toDecimal(tag)
tag = ofx.find('commission')
if (hasattr(tag, 'contents')):
transaction.commission = cls_.toDecimal(tag)
tag = ofx.find('fees')
if (hasattr(tag, 'contents')):
transaction.fees = cls_.toDecimal(tag)
tag = ofx.find('total')
if (hasattr(tag, 'contents')):
transaction.total = cls_.toDecimal(tag)
tag = ofx.find('inv401ksource')
if (hasattr(tag, 'contents')):
transaction.inv401ksource = tag.contents[0].strip()
return transaction
|
ValueError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseInvestmentTransaction
|
7,504
|
@classmethod
def parseInvestmentStatement(cls_, invstmtrs_ofx):
statement = InvestmentStatement()
currency_tag = invstmtrs_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
statement.currency = currency_tag.contents[0].strip().lower()
invtranlist_ofx = invstmtrs_ofx.find('invtranlist')
if (invtranlist_ofx is not None):
tag = invtranlist_ofx.find('dtstart')
if (hasattr(tag, 'contents')):
try:
statement.start_date = cls_.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty start date.'))
if cls_.fail_fast:
raise
except __HOLE__:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid start date:\
%s') % e)
if cls_.fail_fast:
raise
tag = invtranlist_ofx.find('dtend')
if (hasattr(tag, 'contents')):
try:
statement.end_date = cls_.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty end date.'))
except ValueError:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid end date: \
%s') % e)
if cls_.fail_fast:
raise
for transaction_type in ['posmf', 'posstock', 'posopt']:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.positions.append(
cls_.parseInvestmentPosition(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation,
TypeError):
e = sys.exc_info()[1]
if cls_.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): six.u("Error parsing positions: \
") + str(e), six.u('content'): investment_ofx}
)
for transaction_type in InvestmentTransaction.AGGREGATE_TYPES:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.transactions.append(
cls_.parseInvestmentTransaction(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation):
e = sys.exc_info()[1]
if cls_.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): transaction_type + ": " + str(e),
six.u('content'): investment_ofx}
)
return statement
|
ValueError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseInvestmentStatement
|
7,505
|
@classmethod
def parseBalance(cls_, statement, stmt_ofx, bal_tag_name, bal_attr,
bal_date_attr, bal_type_string):
bal_tag = stmt_ofx.find(bal_tag_name)
if hasattr(bal_tag, "contents"):
balamt_tag = bal_tag.find('balamt')
dtasof_tag = bal_tag.find('dtasof')
if hasattr(balamt_tag, "contents"):
try:
setattr(statement, bal_attr, cls_.toDecimal(balamt_tag))
except (IndexError, decimal.InvalidOperation):
ex = sys.exc_info()[1]
statement.warnings.append(
six.u("%s balance amount was empty for \
%s") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise OfxParserException("Empty %s balance\
" % bal_type_string)
if hasattr(dtasof_tag, "contents"):
try:
setattr(statement, bal_date_attr, cls_.parseOfxDateTime(
dtasof_tag.contents[0].strip()))
except __HOLE__:
statement.warnings.append(
six.u("%s balance date was empty for %s\
") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("%s balance date was not allowed for \
%s") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise
|
IndexError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseBalance
|
7,506
|
@classmethod
def parseStatement(cls_, stmt_ofx):
'''
Parse a statement in ofx-land and return a Statement object.
'''
statement = Statement()
dtstart_tag = stmt_ofx.find('dtstart')
if hasattr(dtstart_tag, "contents"):
try:
statement.start_date = cls_.parseOfxDateTime(
dtstart_tag.contents[0].strip())
except __HOLE__:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("Statement start date was not allowed for \
%s") % stmt_ofx)
if cls_.fail_fast:
raise
dtend_tag = stmt_ofx.find('dtend')
if hasattr(dtend_tag, "contents"):
try:
statement.end_date = cls_.parseOfxDateTime(
dtend_tag.contents[0].strip())
except IndexError:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
except ValueError:
ve = sys.exc_info()[1]
msg = six.u("Statement start date was not formatted "
"correctly for %s")
statement.warnings.append(msg % stmt_ofx)
if cls_.fail_fast:
raise
except TypeError:
statement.warnings.append(
six.u("Statement start date was not allowed for \
%s") % stmt_ofx)
if cls_.fail_fast:
raise
currency_tag = stmt_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
try:
statement.currency = currency_tag.contents[0].strip().lower()
except IndexError:
statement.warnings.append(
six.u("Currency definition was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
cls_.parseBalance(statement, stmt_ofx, 'ledgerbal',
'balance', 'balance_date', 'ledger')
cls_.parseBalance(statement, stmt_ofx, 'availbal', 'available_balance',
'available_balance_date', 'ledger')
for transaction_ofx in stmt_ofx.findAll('stmttrn'):
try:
statement.transactions.append(
cls_.parseTransaction(transaction_ofx))
except OfxParserException:
ofxError = sys.exc_info()[1]
statement.discarded_entries.append(
{'error': str(ofxError), 'content': transaction_ofx})
if cls_.fail_fast:
raise
return statement
|
IndexError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseStatement
|
7,507
|
@classmethod
def parseTransaction(cls_, txn_ofx):
'''
Parse a transaction in ofx-land and return a Transaction object.
'''
transaction = Transaction()
type_tag = txn_ofx.find('trntype')
if hasattr(type_tag, 'contents'):
try:
transaction.type = type_tag.contents[0].lower().strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction type"))
except TypeError:
raise OfxParserException(
six.u("No Transaction type (a required field)"))
name_tag = txn_ofx.find('name')
if hasattr(name_tag, "contents"):
try:
transaction.payee = name_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction name"))
except TypeError:
raise OfxParserException(
six.u("No Transaction name (a required field)"))
memo_tag = txn_ofx.find('memo')
if hasattr(memo_tag, "contents"):
try:
transaction.memo = memo_tag.contents[0].strip()
except IndexError:
# Memo can be empty.
pass
except TypeError:
pass
amt_tag = txn_ofx.find('trnamt')
if hasattr(amt_tag, "contents"):
try:
transaction.amount = cls_.toDecimal(amt_tag)
except IndexError:
raise OfxParserException("Invalid Transaction Date")
except decimal.InvalidOperation:
# Some banks use a null transaction for including interest
# rate changes on your statement.
if amt_tag.contents[0].strip() in ('null', '-null'):
transaction.amount = 0
else:
raise OfxParserException(
six.u("Invalid Transaction Amount: '%s'") % amt_tag.contents[0])
except TypeError:
raise OfxParserException(
six.u("No Transaction Amount (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Amount (a required field)"))
date_tag = txn_ofx.find('dtposted')
if hasattr(date_tag, "contents"):
try:
transaction.date = cls_.parseOfxDateTime(
date_tag.contents[0].strip())
except __HOLE__:
raise OfxParserException("Invalid Transaction Date")
except ValueError:
ve = sys.exc_info()[1]
raise OfxParserException(str(ve))
except TypeError:
raise OfxParserException(
six.u("No Transaction Date (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Date (a required field)"))
id_tag = txn_ofx.find('fitid')
if hasattr(id_tag, "contents"):
try:
transaction.id = id_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty FIT id (a required \
field)"))
except TypeError:
raise OfxParserException(six.u("No FIT id (a required field)"))
else:
raise OfxParserException(six.u("Missing FIT id (a required \
field)"))
sic_tag = txn_ofx.find('sic')
if hasattr(sic_tag, 'contents'):
try:
transaction.sic = sic_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction Standard \
Industry Code (SIC)"))
if transaction.sic is not None and transaction.sic in mcc.codes:
try:
transaction.mcc = mcc.codes.get(transaction.sic, '').get('combined \
description')
except IndexError:
raise OfxParserException(six.u("Empty transaction Merchant Category \
Code (MCC)"))
except AttributeError:
if cls._fail_fast:
raise
checknum_tag = txn_ofx.find('checknum')
if hasattr(checknum_tag, 'contents'):
try:
transaction.checknum = checknum_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty Check (or other reference) \
number"))
return transaction
|
IndexError
|
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/OfxParser.parseTransaction
|
7,508
|
def handle(self, *args, **options):
try:
import pycountry
except __HOLE__:
raise CommandError(
"You are missing the pycountry library. Install it with "
"'pip install pycountry'")
if Country.objects.exists():
if options.get('is_initial_only', False):
# exit quietly, as the initial load already seems to have happened.
self.stdout.write("Countries already populated; nothing to be done.")
sys.exit(0)
else:
raise CommandError(
"You already have countries in your database. This command "
"currently does not support updating existing countries.")
countries = [
Country(
iso_3166_1_a2=country.alpha2,
iso_3166_1_a3=country.alpha3,
iso_3166_1_numeric=country.numeric,
printable_name=country.name,
name=getattr(country, 'official_name', ''),
is_shipping_country=options['is_shipping'])
for country in pycountry.countries]
Country.objects.bulk_create(countries)
self.stdout.write("Successfully added %s countries." % len(countries))
|
ImportError
|
dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/management/commands/oscar_populate_countries.py/Command.handle
|
7,509
|
def spa_c(time, latitude, longitude, pressure=101325, altitude=0,
temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
longitude : float
pressure : float
Pressure in Pascals
altitude : float
Elevation above sea level.
temperature : float
Temperature in C
delta_t : float
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except __HOLE__:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
pvl_logger.debug('using built-in spa code to calculate solar position')
time_utc = time
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
timezone=0, # must input localized or utc times
latitude=latitude,
longitude=longitude,
elevation=altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time_utc)
if raw_spa_output:
return spa_df
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
|
ImportError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/spa_c
|
7,510
|
def spa_python(time, latitude, longitude,
altitude=0, pressure=101325, temperature=12, delta_t=None,
atmos_refract=None, how='numpy', numthreads=4):
"""
Calculate the solar position using a python implementation of the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
longitude : float
altitude : float
pressure : int or float, optional
avg. yearly air pressure in Pascals.
temperature : int or float, optional
avg. yearly air temperature in degrees C.
delta_t : float, optional
Difference between terrestrial time and UT1.
The USNO has historical and forecasted delta_t [3].
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_zenith (degrees),
zenith (degrees),
apparent_elevation (degrees),
elevation (degrees),
azimuth (degrees),
equation_of_time (minutes).
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar
radiation applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_c, ephemeris
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating solar position with spa_python code')
lat = latitude
lon = longitude
elev = altitude
pressure = pressure / 100 # pressure must be in millibars for calculation
delta_t = delta_t or 67.0
atmos_refract = atmos_refract or 0.5667
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (__HOLE__, ValueError):
time = pd.DatetimeIndex([time, ])
unixtime = time.astype(np.int64)/10**9
spa = _spa_python_import(how)
app_zenith, zenith, app_elevation, elevation, azimuth, eot = spa.solar_position(
unixtime, lat, lon, elev, pressure, temperature, delta_t,
atmos_refract, numthreads)
result = pd.DataFrame({'apparent_zenith': app_zenith, 'zenith': zenith,
'apparent_elevation': app_elevation,
'elevation': elevation, 'azimuth': azimuth,
'equation_of_time': eot},
index=time)
return result
|
TypeError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/spa_python
|
7,511
|
def get_sun_rise_set_transit(time, latitude, longitude, how='numpy',
delta_t=None,
numthreads=4):
"""
Calculate the sunrise, sunset, and sun transit times using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Only the date part is used
latitude : float
longitude : float
delta_t : float, optional
Difference between terrestrial time and UT1.
By default, use USNO historical data and predictions
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
sunrise, sunset, transit
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating sunrise, set, transit with spa_python code')
lat = latitude
lon = longitude
delta_t = delta_t or 67.0
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (__HOLE__, ValueError):
time = pd.DatetimeIndex([time, ])
# must convert to midnight UTC on day of interest
utcday = pd.DatetimeIndex(time.date).tz_localize('UTC')
unixtime = utcday.astype(np.int64)/10**9
spa = _spa_python_import(how)
transit, sunrise, sunset = spa.transit_sunrise_sunset(
unixtime, lat, lon, delta_t, numthreads)
# arrays are in seconds since epoch format, need to conver to timestamps
transit = pd.to_datetime(transit, unit='s', utc=True).tz_convert(
time.tz).tolist()
sunrise = pd.to_datetime(sunrise, unit='s', utc=True).tz_convert(
time.tz).tolist()
sunset = pd.to_datetime(sunset, unit='s', utc=True).tz_convert(
time.tz).tolist()
result = pd.DataFrame({'transit': transit,
'sunrise': sunrise,
'sunset': sunset}, index=time)
return result
|
TypeError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/get_sun_rise_set_transit
|
7,512
|
def pyephem(time, latitude, longitude, altitude=0, pressure=101325,
temperature=12):
"""
Calculate the solar position using the PyEphem package.
Parameters
----------
time : pandas.DatetimeIndex
Localized or UTC.
latitude : float
longitude : float
altitude : float
distance above sea level.
pressure : int or float, optional
air pressure in Pascals.
temperature : int or float, optional
air temperature in degrees C.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_elevation, elevation,
apparent_azimuth, azimuth,
apparent_zenith, zenith.
See also
--------
spa_python, spa_c, ephemeris
"""
# Written by Will Holmgren (@wholmgren), University of Arizona, 2014
try:
import ephem
except ImportError:
raise ImportError('PyEphem must be installed')
pvl_logger.debug('using PyEphem to calculate solar position')
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except __HOLE__:
time_utc = time
sun_coords = pd.DataFrame(index=time)
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature)
# make and fill lists of the sun's altitude and azimuth
# this is the pressure and temperature corrected apparent alt/az.
alts = []
azis = []
for thetime in time_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['apparent_elevation'] = alts
sun_coords['apparent_azimuth'] = azis
# redo it for p=0 to get no atmosphere alt/az
obs.pressure = 0
alts = []
azis = []
for thetime in time_utc:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['elevation'] = alts
sun_coords['azimuth'] = azis
# convert to degrees. add zenith
sun_coords = np.rad2deg(sun_coords)
sun_coords['apparent_zenith'] = 90 - sun_coords['apparent_elevation']
sun_coords['zenith'] = 90 - sun_coords['elevation']
return sun_coords
|
TypeError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/pyephem
|
7,513
|
def ephemeris(time, latitude, longitude, pressure=101325, temperature=12):
"""
Python-native solar position calculator.
The accuracy of this code is not guaranteed.
Consider using the built-in spa_c code or the PyEphem library.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
pressure : float or Series
Ambient pressure (Pascals)
temperature : float or Series
Ambient temperature (C)
Returns
-------
DataFrame with the following columns:
* apparent_elevation : apparent sun elevation accounting for
atmospheric refraction.
* elevation : actual elevation (not accounting for refraction)
of the sun in decimal degrees, 0 = on horizon.
The complement of the zenith angle.
* azimuth : Azimuth of the sun in decimal degrees East of North.
This is the complement of the apparent zenith angle.
* apparent_zenith : apparent sun zenith accounting for atmospheric
refraction.
* zenith : Solar zenith angle
* solar_time : Solar time in decimal hours (solar noon is 12.00).
References
-----------
Grover Hughes' class and related class materials on Engineering
Astronomy at Sandia National Laboratories, 1985.
See also
--------
pyephem, spa_c, spa_python
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Most comments in this function are from PVLIB_MATLAB or from
# pvlib-python's attempt to understand and fix problems with the
# algorithm. The comments are *not* based on the reference material.
# This helps a little bit:
# http://www.cv.nrao.edu/~rfisher/Ephemerides/times.html
# the inversion of longitude is due to the fact that this code was
# originally written for the convention that positive longitude were for
# locations west of the prime meridian. However, the correct convention (as
# of 2009) is to use negative longitudes for locations west of the prime
# meridian. Therefore, the user should input longitude values under the
# correct convention (e.g. Albuquerque is at -106 longitude), but it needs
# to be inverted for use in the code.
Latitude = latitude
Longitude = -1 * longitude
Abber = 20 / 3600.
LatR = np.radians(Latitude)
# the SPA algorithm needs time to be expressed in terms of
# decimal UTC hours of the day of the year.
# if localized, convert to UTC. otherwise, assume UTC.
try:
time_utc = time.tz_convert('UTC')
except __HOLE__:
time_utc = time
# strip out the day of the year and calculate the decimal hour
DayOfYear = time_utc.dayofyear
DecHours = (time_utc.hour + time_utc.minute/60. + time_utc.second/3600. +
time_utc.microsecond/3600.e6)
UnivDate = DayOfYear
UnivHr = DecHours
Yr = time_utc.year - 1900
YrBegin = 365 * Yr + np.floor((Yr - 1) / 4.) - 0.5
Ezero = YrBegin + UnivDate
T = Ezero / 36525.
# Calculate Greenwich Mean Sidereal Time (GMST)
GMST0 = 6 / 24. + 38 / 1440. + (
45.836 + 8640184.542 * T + 0.0929 * T ** 2) / 86400.
GMST0 = 360 * (GMST0 - np.floor(GMST0))
GMSTi = np.mod(GMST0 + 360 * (1.0027379093 * UnivHr / 24.), 360)
# Local apparent sidereal time
LocAST = np.mod((360 + GMSTi - Longitude), 360)
EpochDate = Ezero + UnivHr / 24.
T1 = EpochDate / 36525.
ObliquityR = np.radians(
23.452294 - 0.0130125 * T1 - 1.64e-06 * T1 ** 2 + 5.03e-07 * T1 ** 3)
MlPerigee = 281.22083 + 4.70684e-05 * EpochDate + 0.000453 * T1 ** 2 + (
3e-06 * T1 ** 3)
MeanAnom = np.mod((358.47583 + 0.985600267 * EpochDate - 0.00015 *
T1 ** 2 - 3e-06 * T1 ** 3), 360)
Eccen = 0.01675104 - 4.18e-05 * T1 - 1.26e-07 * T1 ** 2
EccenAnom = MeanAnom
E = 0
while np.max(abs(EccenAnom - E)) > 0.0001:
E = EccenAnom
EccenAnom = MeanAnom + np.degrees(Eccen)*np.sin(np.radians(E))
TrueAnom = (
2 * np.mod(np.degrees(np.arctan2(((1 + Eccen) / (1 - Eccen)) ** 0.5 *
np.tan(np.radians(EccenAnom) / 2.), 1)), 360))
EcLon = np.mod(MlPerigee + TrueAnom, 360) - Abber
EcLonR = np.radians(EcLon)
DecR = np.arcsin(np.sin(ObliquityR)*np.sin(EcLonR))
RtAscen = np.degrees(np.arctan2(np.cos(ObliquityR)*np.sin(EcLonR),
np.cos(EcLonR)))
HrAngle = LocAST - RtAscen
HrAngleR = np.radians(HrAngle)
HrAngle = HrAngle - (360 * ((abs(HrAngle) > 180)))
SunAz = np.degrees(np.arctan2(-np.sin(HrAngleR),
np.cos(LatR)*np.tan(DecR) -
np.sin(LatR)*np.cos(HrAngleR)))
SunAz[SunAz < 0] += 360
SunEl = np.degrees(np.arcsin(
np.cos(LatR) * np.cos(DecR) * np.cos(HrAngleR) +
np.sin(LatR) * np.sin(DecR)))
SolarTime = (180 + HrAngle) / 15.
# Calculate refraction correction
Elevation = SunEl
TanEl = pd.Series(np.tan(np.radians(Elevation)), index=time_utc)
Refract = pd.Series(0, index=time_utc)
Refract[(Elevation > 5) & (Elevation <= 85)] = (
58.1/TanEl - 0.07/(TanEl**3) + 8.6e-05/(TanEl**5))
Refract[(Elevation > -0.575) & (Elevation <= 5)] = (
Elevation *
(-518.2 + Elevation*(103.4 + Elevation*(-12.79 + Elevation*0.711))) +
1735)
Refract[(Elevation > -1) & (Elevation <= -0.575)] = -20.774 / TanEl
Refract *= (283/(273. + temperature)) * (pressure/101325.) / 3600.
ApparentSunEl = SunEl + Refract
# make output DataFrame
DFOut = pd.DataFrame(index=time)
DFOut['apparent_elevation'] = ApparentSunEl
DFOut['elevation'] = SunEl
DFOut['azimuth'] = SunAz
DFOut['apparent_zenith'] = 90 - ApparentSunEl
DFOut['zenith'] = 90 - SunEl
DFOut['solar_time'] = SolarTime
return DFOut
|
TypeError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/ephemeris
|
7,514
|
def calc_time(lower_bound, upper_bound, latitude, longitude, attribute, value,
altitude=0, pressure=101325, temperature=12, xtol=1.0e-12):
"""
Calculate the time between lower_bound and upper_bound
where the attribute is equal to value. Uses PyEphem for
solar position calculations.
Parameters
----------
lower_bound : datetime.datetime
upper_bound : datetime.datetime
latitude : float
longitude : float
attribute : str
The attribute of a pyephem.Sun object that
you want to solve for. Likely options are 'alt'
and 'az' (which must be given in radians).
value : int or float
The value of the attribute to solve for
altitude : float
Distance above sea level.
pressure : int or float, optional
Air pressure in Pascals. Set to 0 for no
atmospheric correction.
temperature : int or float, optional
Air temperature in degrees C.
xtol : float, optional
The allowed error in the result from value
Returns
-------
datetime.datetime
Raises
------
ValueError
If the value is not contained between the bounds.
AttributeError
If the given attribute is not an attribute of a
PyEphem.Sun object.
"""
try:
import scipy.optimize as so
except __HOLE__:
raise ImportError('The calc_time function requires scipy')
obs, sun = _ephem_setup(latitude, longitude, altitude,
pressure, temperature)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return getattr(sun, attr) - target
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub,
(value, attribute), xtol=xtol)
return djd_to_datetime(djd_root)
|
ImportError
|
dataset/ETHPy150Open pvlib/pvlib-python/pvlib/solarposition.py/calc_time
|
7,515
|
def add_metadata(self, _environ, identity):
#logger = environ.get('repoze.who.logger','')
key = identity.get('repoze.who.userid')
try:
if self.key_attribute:
for sec in self.users.sections():
if self.users.has_option(sec, self.key_attribute):
if key in self.users.get(sec, self.key_attribute):
identity["user"] = dict(self.users.items(sec))
break
else:
identity["user"] = dict(self.users.items(key))
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/s2repoze/plugins/ini.py/INIMetadataProvider.add_metadata
|
7,516
|
def setup(hass, config):
"""Setup the Graphite feeder."""
graphite_config = config.get('graphite', {})
host = graphite_config.get('host', 'localhost')
prefix = graphite_config.get('prefix', 'ha')
try:
port = int(graphite_config.get('port', 2003))
except __HOLE__:
_LOGGER.error('Invalid port specified')
return False
GraphiteFeeder(hass, host, port, prefix)
return True
|
ValueError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/graphite.py/setup
|
7,517
|
def _report_attributes(self, entity_id, new_state):
"""Report the attributes."""
now = time.time()
things = dict(new_state.attributes)
try:
things['state'] = state.state_as_number(new_state)
except __HOLE__:
pass
lines = ['%s.%s.%s %f %i' % (self._prefix,
entity_id, key.replace(' ', '_'),
value, now)
for key, value in things.items()
if isinstance(value, (float, int))]
if not lines:
return
_LOGGER.debug('Sending to graphite: %s', lines)
try:
self._send_to_graphite('\n'.join(lines))
except socket.gaierror:
_LOGGER.error('Unable to connect to host %s', self._host)
except socket.error:
_LOGGER.exception('Failed to send data to graphite')
|
ValueError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/graphite.py/GraphiteFeeder._report_attributes
|
7,518
|
def mkdir_p(path):
"""
Borrowed from: http://stackoverflow.com/a/600612/386925
"""
try:
os.makedirs(path)
except __HOLE__ as exc: # Python >2.5
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
OSError
|
dataset/ETHPy150Open winhamwr/neckbeard/neckbeard/configuration.py/mkdir_p
|
7,519
|
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
if options.quiet == 1:
level = "WARNING"
if options.quiet == 2:
level = "ERROR"
else:
level = "CRITICAL"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[0],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_streams[1],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": level,
"handlers": list(filter(None, [
"console",
"console_errors",
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
if sys.version_info[:2] == (2, 6):
warnings.warn(
"Python 2.6 is no longer supported by the Python core team, "
"please upgrade your Python. A future version of pip will "
"drop support for Python 2.6",
deprecation.Python26DeprecationWarning
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except __HOLE__:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check and not
getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
return SUCCESS
|
KeyboardInterrupt
|
dataset/ETHPy150Open pypa/pip/pip/basecommand.py/Command.main
|
7,520
|
def setup_client(self, host, port):
"""Sets up the socket client.
Args:
host: String of the host name.
port: Int of the port name.
"""
try:
self.client_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.client_socket.connect((host, port))
except __HOLE__:
self.log.error('Could not connect to remote socket server. %s:%s',
host, port)
|
IOError
|
dataset/ETHPy150Open missionpinball/mpf/mpf/plugins/socket_events.py/SocketClient.setup_client
|
7,521
|
def send_message(self, message):
"""Sends a message to the remote socket host.
Args:
message: String of the message to send.
"""
self.log.info("SOCKET SENDING: %s", message)
prepped_message = message + '\n'
try:
self.client_socket.send(prepped_message)
except __HOLE__:
# maybe we got disconnected? Attempt to connect and send.
self.setup_client(self.server_name, self.server_port)
try:
self.client_socket.send(prepped_message)
except:
self.log.error('Unable to send %s to remote socket server',
prepped_message)
|
IOError
|
dataset/ETHPy150Open missionpinball/mpf/mpf/plugins/socket_events.py/SocketClient.send_message
|
7,522
|
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs['default']
except __HOLE__:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)
|
KeyError
|
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/ext/compiler.py/_dispatcher.__call__
|
7,523
|
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
# if dtype has an embeded tz, capture it
if dtype is not None:
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
dtz = getattr(dtype, 'tz', None)
if dtz is not None:
if tz is not None and str(tz) != str(dtz):
raise ValueError("cannot supply both a tz and a dtype"
" with a tz")
tz = dtz
except __HOLE__:
pass
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if lib.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data, errors='raise')
data.offset = freq
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
if tz is not None:
# we might already be localized to this tz
# so passing the same tz is ok
# however any other tz is a no-no
if data.tz is None:
return data.tz_localize(tz, ambiguous=ambiguous)
elif str(tz) != str(data.tz):
raise TypeError("Already tz-aware, use tz_convert "
"to convert.")
return data
if issubclass(data.dtype.type, compat.string_types):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, ABCSeries):
data = data._values
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
else:
# the tz's must match
if str(tz) != str(data.tz):
raise TypeError("Already tz-aware, use tz_convert "
"to convert.")
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
if isinstance(data, (ABCSeries, Index)):
values = data._values
else:
values = data
if lib.is_string_array(values):
subarr = tslib.parse_str_array_to_datetime(
values, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst)
else:
try:
subarr = tools.to_datetime(data, box=False)
# make sure that we have a index/ndarray like (and not a
# Series)
if isinstance(subarr, ABCSeries):
subarr = subarr._values
if subarr.dtype == np.object_:
subarr = tools._to_datetime(subarr, box=False)
except ValueError:
# tz aware
subarr = tools._to_datetime(data, box=False, utc=True)
# we may not have been able to convert
if not (is_datetimetz(subarr) or
np.issubdtype(subarr.dtype, np.datetime64)):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
# if dtype is provided, coerce here
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
dtype = DatetimeTZDtype.construct_from_string(dtype)
subarr = subarr.tz_localize(dtype.tz)
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(
subarr), None, freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'dates does not conform to passed '
'frequency {1}'
.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.__new__
|
7,524
|
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except __HOLE__:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
return self.astype('O') + offset
|
NotImplementedError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex._add_offset
|
7,525
|
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except __HOLE__:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.union
|
7,526
|
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except __HOLE__:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.union_many
|
7,527
|
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, __HOLE__):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
|
ValueError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.join
|
7,528
|
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (__HOLE__, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.intersection
|
7,529
|
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (__HOLE__, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.get_value
|
7,530
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, __HOLE__, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
|
KeyError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.get_loc
|
7,531
|
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except __HOLE__:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
|
KeyError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.slice_indexer
|
7,532
|
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (__HOLE__, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
|
AttributeError
|
dataset/ETHPy150Open pydata/pandas/pandas/tseries/index.py/DatetimeIndex.insert
|
7,533
|
def run_tests(self):
auto_reload = False
if '-a' in sys.argv or '--autoreload' in sys.argv:
auto_reload = True
if auto_reload:
from django.utils.autoreload import restart_with_reloader, reloader_thread
if os.environ.get("RUN_MAIN") == "true":
try:
TestCommand.run_tests(self)
except LabelException:
sys.exit(1)
except:
pass
try:
reloader_thread()
except __HOLE__:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
else:
return TestCommand.run_tests(self)
|
KeyboardInterrupt
|
dataset/ETHPy150Open praekelt/django-setuptest/setuptest/__init__.py/test.run_tests
|
7,534
|
def run(host, queue, exchange, key):
"Long running message queue processor"
# we define a tag based on the queue name
tag = "%s_tag" % queue
# set up a connection to the server
conn = amqp.Connection(host)
# and get a channel
chan = conn.channel()
# define your queue and exchange
# if they already exist then we check they are of the correct type
chan.exchange_declare(exchange=exchange, type="direct",
durable=False, auto_delete=True)
# for our purposes queues want to be unique between implementations
# and exchanges are defined by the paperboy service
chan.queue_declare(queue=queue, durable=False,
exclusive=False, auto_delete=True)
# and wire the queue and exchange together
chan.queue_bind(queue=queue, exchange=exchange, routing_key=key)
# register our callback function for when we see something on the queue
chan.basic_consume(queue=queue, no_ack=True,
callback=recv_callback, consumer_tag=tag)
# set the script to be long running
try:
while True:
chan.wait()
except __HOLE__:
# if we do exit then tell the server
chan.basic_cancel(tag)
# and close everything
chan.close()
conn.close()
sys.exit()
|
KeyboardInterrupt
|
dataset/ETHPy150Open garethr/Asteroid/asteroid/bin/asteroid_listen.py/run
|
7,535
|
def get_c_extract(r, name, sub):
"""
Wrapper around c_extract that initializes py_name from storage.
"""
# `c_extract` is called when getting the value of an apply node's
# input from the compute map, before being used by its clients.
# If one of the clients has `check_input=True`, we need to perform
# checks on the variable.
# However that code is not used by C code of the apply node creating
# this variable, so there is no need to check `r.owner.op.check_input`.
if any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if any([getattr(c.op, 'check_broadcast', True)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
c_extract = r.type.c_extract(name, sub, True)
else:
try:
c_extract = r.type.c_extract(
name, sub, True,
check_broadcast=False)
except __HOLE__ as e:
c_extract = r.type.c_extract(name, sub, True)
else:
c_extract = r.type.c_extract(name, sub, False)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/get_c_extract
|
7,536
|
def get_c_extract_out(r, name, sub):
"""
Wrapper around c_extract_out that initializes py_name from storage.
"""
# `c_extract_out` is used to extract an output variable from
# the compute map, to be used as pre-allocated memory for `r`
# before its value gets computed.
# If the node producing `r` has `check_inputs=True`, it may
# also perform type checks on the initial value of the output,
# so we need to pass `check_input=True` to `c_extract_out`.
# However, that code is not used by potential clients of `r`,
# so we do not need to check them.
check_input = getattr(r.owner.op, 'check_input', config.check_input)
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if getattr(r.owner.op, 'check_broadcast', True):
c_extract = r.type.c_extract_out(name, sub, check_input)
else:
try:
c_extract = r.type.c_extract_out(name, sub, check_input,
check_broadcast=False)
except __HOLE__ as e:
c_extract = r.type.c_extract_out(name, sub, check_input)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/get_c_extract_out
|
7,537
|
def fetch_variables(self):
"""
WRITEME
Fills the inputs, outputs, variables, orphans, temps and node_order
fields.
"""
fgraph = self.fgraph
self.inputs = fgraph.inputs
self.outputs = fgraph.outputs
self.node_order = self.schedule(fgraph)
# list(fgraph.variables)
# We need to include the unused inputs in our variables,
# otherwise we can't pass them to the module.
self.variables = [var for var in self.inputs if not len(var.clients)]
self.variables += graph.variables(self.inputs, self.outputs)
# This adds a hidden input which is the params for each node
# that needs it
self.node_params = dict()
for node in self.node_order:
params = node.run_params()
if params is not graph.NoParams:
# try to avoid creating more than one variable for the
# same params.
if params in self.node_params:
var = self.node_params[params]
assert var.type == node.params_type
var.clients.append((node, 'params'))
else:
var = graph.Constant(node.params_type, params)
var.clients = [(node, 'params')]
self.node_params[params] = var
self.variables.append(var)
# The orphans field is listified to ensure a consistent order.
# list(fgraph.orphans.difference(self.outputs))
self.orphans = list(r for r in self.variables
if isinstance(r, graph.Constant) and
r not in self.inputs)
# C type constants (theano.scalar.Scalar). They don't request an object
self.consts = []
# Move c type from orphans (theano.scalar.Scalar) to self.consts
for variable in self.orphans:
if isinstance(variable, graph.Constant):
try:
variable.type.c_literal(variable.data)
self.consts.append(variable)
self.orphans.remove(variable)
except (utils.MethodNotDefined, __HOLE__):
pass
self.temps = list(set(self.variables).difference(
self.inputs).difference(self.outputs).difference(self.orphans))
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.fetch_variables
|
7,538
|
def compile_args(self):
"""
WRITEME
Returns a list of compile args that are needed by one
or more Variables or Ops.
This might contain duplicates.
"""
ret = ["-O3"]
# this is the param the -ffast-math activate. I put the explicitly as
# FillMissing must disable some of them. Putting -ffast-math would
# make it disable all other parameter at the same time.
ret += ["-fno-math-errno",
# "-funsafe-math-optimizations",
# "-fno-signaling-nans",
# "-fcx-limited-range",
# "-fno-rounding-math",
# "-ffinite-math-only",
# the current code generate label event if they are not used.
# Could use gcc attribute for those label only
"-Wno-unused-label",
"-Wno-unused-variable", # idem as the precedent
"-Wno-write-strings", # generated by our code generator...
]
c_compiler = self.c_compiler()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
ret += x.c_compile_args(c_compiler)
except TypeError:
ret += x.c_compile_args()
except utils.MethodNotDefined:
pass
ret = utils.uniq(ret) # to remove duplicate
# The args set by the compiler include the user flags. We do not want
# to reorder them
ret += c_compiler.compile_args()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
no_comp = x.c_no_compile_args(c_compiler)
except TypeError:
no_comp = x.c_no_compile_args()
for i in no_comp:
try:
ret.remove(i)
except __HOLE__:
pass # in case the value is not there
except utils.MethodNotDefined:
pass
return ret
|
ValueError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.compile_args
|
7,539
|
def headers(self):
"""
WRITEME
Returns a list of headers that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
c_compiler = self.c_compiler()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
ret += x.c_headers(c_compiler)
except __HOLE__:
ret += x.c_headers()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.headers
|
7,540
|
def header_dirs(self):
"""
WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
c_compiler = self.c_compiler()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
ret += x.c_header_dirs(c_compiler)
except __HOLE__:
ret += x.c_header_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.header_dirs
|
7,541
|
def libraries(self):
"""
WRITEME
Returns a list of libraries that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
c_compiler = self.c_compiler()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
ret += x.c_libraries(c_compiler)
except __HOLE__:
ret += x.c_libraries()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.libraries
|
7,542
|
def lib_dirs(self):
"""
WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
c_compiler = self.c_compiler()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
try:
ret += x.c_lib_dirs(c_compiler)
except __HOLE__:
ret += x.c_lib_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.lib_dirs
|
7,543
|
def cmodule_key_(self, fgraph, no_recycling, compile_args=None,
libraries=None, header_dirs=None, insert_config_md5=True,
c_compiler=None):
"""
Do the actual computation of cmodule_key in a static method
to allow it to be reused in scalar.Composite.__eq__.
"""
if compile_args is None:
compile_args = []
if libraries is None:
libraries = []
if header_dirs is None:
header_dirs = []
order = self.schedule(fgraph)
# set of variables that have been computed by nodes we have
# seen 'so far' in the loop below
fgraph_computed_set = set()
fgraph_inputs_dict = dict((i, (-1, pos)) for pos, i in
enumerate(fgraph.inputs))
constant_ids = dict()
op_pos = {} # Apply -> topological position
# First we put the header, compile_args, library names and config md5
# into the signature.
sig = ['CLinker.cmodule_key'] # will be cast to tuple on return
if compile_args is not None:
# We must sort it as the order from a set is not guaranteed.
# In particular, 2 sets with the same content can give different
# order depending on the order you put data in it.
# Sets are used to remove duplicate elements.
args = sorted(compile_args)
args = tuple(args)
sig.append(args)
if libraries is not None:
# see comments for compile_args
args = sorted(libraries)
args = tuple(args)
sig.append(args)
if header_dirs is not None:
args = sorted(header_dirs)
args = tuple(args)
sig.append(args)
# We must always add the numpy ABI version here as
# DynamicModule always add the include <numpy/arrayobject.h>
sig.append('NPY_ABI_VERSION=0x%X' %
numpy.core.multiarray._get_ndarray_c_version())
if c_compiler:
sig.append('c_compiler_str=' + c_compiler.version_str())
# IMPORTANT: The 'md5' prefix is used to isolate the compilation
# parameters from the rest of the key. If you want to add more key
# elements, they should be before this md5 hash if and only if they
# can lead to a different compiled file with the same source code.
if insert_config_md5:
sig.append('md5:' + theano.configparser.get_config_md5())
else:
sig.append('md5: <omitted>')
error_on_play = [False]
def in_sig(i, topological_pos, i_idx):
# assert that every input to every node is one of'
# - an fgraph input
# - an output from a node in the FunctionGraph
# - a Constant
# It is important that a variable (i)
# yield a 'position' that reflects its role in code_gen()
if isinstance(i, graph.Constant): # orphans
if id(i) not in constant_ids:
isig = (i.signature(), topological_pos, i_idx)
# If the Theano constant provides a strong hash
# (no collision for transpose, 2, 1, 0, -1, -2,
# 2 element swapped...) we put this hash in the signature
# instead of the value. This makes the key file much
# smaller for big constant arrays. Before this, we saw key
# files up to 80M.
if hasattr(isig[0], "theano_hash"):
isig = (isig[0].theano_hash(), topological_pos, i_idx)
try:
hash(isig)
except Exception:
# generic constants don't have a hashable signature
error_on_play[0] = True
return None
constant_ids[id(i)] = isig
else:
isig = constant_ids[id(i)]
# print 'SIGNATURE', i.signature()
# return i.signature()
elif i in fgraph_inputs_dict: # inputs
isig = fgraph_inputs_dict[i]
else:
if i.owner is None:
assert all(all(out is not None for out in o.outputs)
for o in order)
assert all(input.owner is None for input in fgraph.inputs)
raise Exception('what is this?', (i, type(i), i.clients,
fgraph))
if i in fgraph.outputs:
isig = (op_pos[i.owner], # outputs
i.owner.outputs.index(i),
fgraph.outputs.index(i))
else:
isig = (op_pos[i.owner], i.owner.outputs.index(i)) # temps
return (isig, i in no_recycling)
version = []
for node_pos, node in enumerate(order):
try:
# Pure Ops do not have a c_code_cache_version_apply ...
version.append(node.op.c_code_cache_version_apply(node))
except __HOLE__:
pass
for i in node.inputs:
version.append(i.type.c_code_cache_version())
for o in node.outputs:
version.append(o.type.c_code_cache_version())
# add the signature for this node
sig.append((
node.op,
tuple((i.type, in_sig(i, node_pos, ipos))
for ipos, i in enumerate(node.inputs)),
(1, # Increment if cmodule change its handling of outputs
tuple(o in no_recycling for o in node.outputs))))
if error_on_play[0]:
# if one of the signatures is not hashable
# then bypass the cache mechanism and
# compile fresh every time
return None
op_pos[node] = node_pos
fgraph_computed_set.update(node.outputs)
# Add not used input in the key
# If inputs don't define a 'clients' attribute (as is the case if
# fgraph is not a real FunctionGraph but a FakeFunctionGraph, a
# lightweight class designed to imitate FunctionGraph), pretend they
# have none. This if fine because the goal is only to have all of the
# graph's information used to compute the key. If we mistakenly
# pretend that inputs with clients don't have any, were are only using
# those inputs more than once to compute the key.
for ipos, var in [(i, var) for i, var in enumerate(fgraph.inputs)
if not len(getattr(var, 'clients', []))]:
sig.append((var.type, in_sig(var, -1, ipos)))
# crystalize the signature and version
sig = tuple(sig)
version = tuple(version)
for v in version:
if not v:
# one of the ops or types here is unversioned,
# so this fgraph is entirely unversioned
return ((), sig)
return version, sig
|
AttributeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.cmodule_key_
|
7,544
|
def cthunk_factory(self, error_storage, in_storage, out_storage,
storage_map=None, keep_lock=False):
"""WRITEME
error_storage -> list of length 3
in_storage -> list of lists of length 1, one per input
out_storage -> list of lists of length 1, one per output
Returns a thunk that points to an instance of a C struct that
can carry on the computation of this linker's fgraph. That thunk,
when executed, will fetch its inputs from in_storage, put its
outputs in out_storage and if an error occurs will put the
type, value and traceback of the exception in error_storage.
"""
try:
key = self.cmodule_key()
except __HOLE__:
key = None
if key is None:
# If we can't get a key, then forget the cache mechanism.
module = self.compile_cmodule()
else:
module = get_module_cache().module_from_key(
key=key, lnk=self, keep_lock=keep_lock)
vars = self.inputs + self.outputs + self.orphans
# List of indices that should be ignored when passing the arguments
# (basically, everything that the previous call to uniq eliminated)
dupidx = [i for i, x in enumerate(vars)
if vars.count(x) > 1 and vars.index(x) != i]
out_storage = [x for i, x in enumerate(out_storage)
if (i + len(in_storage)) not in dupidx]
in_storage = [x for i, x in enumerate(in_storage) if i not in dupidx]
if storage_map is None:
orphd = [[orphan.data] for orphan in self.orphans]
else:
orphd = [storage_map[orphan] for orphan in self.orphans]
ret = module.instantiate(error_storage,
*(in_storage + out_storage + orphd))
return ret
|
KeyError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/CLinker.cthunk_factory
|
7,545
|
def __call__(self):
failure = run_cthunk(self.cthunk)
if failure:
task, taskname, id = self.find_task(failure)
try:
trace = task.trace
except __HOLE__:
trace = ()
try:
exc_type, _exc_value, exc_trace = self.error_storage
if task in self.nodes:
self.position_of_error = self.nodes.index(task)
# this can be used to retrieve the location the Op was declared
exc_value = exc_type(_exc_value)
exc_value.__thunk_trace__ = trace
except Exception:
print(('ERROR retrieving error_storage.'
'Was the error set in the c code?'),
end=' ', file=sys.stderr)
print(self.error_storage, file=sys.stderr)
raise
reraise(exc_type, exc_value, exc_trace)
|
AttributeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/cc.py/_CThunk.__call__
|
7,546
|
def _get_filename(loader, mod_name):
try:
get_filename = loader.get_filename
except __HOLE__:
return None
else:
return get_filename(mod_name)
# Helper to get the loader, code and filename for a module
|
AttributeError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/runpy.py/_get_filename
|
7,547
|
def _run_module_as_main(mod_name, set_argv0=True):
"""Runs the designated module in the __main__ namespace
These __*__ magic variables will be overwritten:
__file__
__loader__
"""
try:
loader, code, fname = _get_module_details(mod_name)
except __HOLE__ as exc:
# Try to provide a good error message
# for directories, zip files and the -m switch
if set_argv0:
# For -m switch, just disply the exception
info = str(exc)
else:
# For directories/zipfiles, let the user
# know what the code was looking for
info = "can't find '__main__.py' in %r" % sys.argv[0]
msg = "%s: %s" % (sys.executable, info)
sys.exit(msg)
pkg_name = mod_name.rpartition('.')[0]
main_globals = sys.modules["__main__"].__dict__
if set_argv0:
sys.argv[0] = fname
return _run_code(code, main_globals, None,
"__main__", fname, loader, pkg_name)
|
ImportError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/runpy.py/_run_module_as_main
|
7,548
|
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except __HOLE__:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
|
ImportError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/PluginLoader.load
|
7,549
|
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except __HOLE__:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
|
TypeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/format_argspec_init
|
7,550
|
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except __HOLE__:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
|
TypeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/getargspec_init
|
7,551
|
def generic_repr(obj, additional_kw=(), to_inspect=None):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
inspect.getargspec(insp.__init__)
except __HOLE__:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
|
TypeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/generic_repr
|
7,552
|
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except __HOLE__:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
|
TypeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/monkeypatch_proxied_specials
|
7,553
|
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except __HOLE__:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
|
AttributeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/util/langhelpers.py/dependencies._importlater.__getattr__
|
7,554
|
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, __HOLE__) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
|
KeyError
|
dataset/ETHPy150Open openstack-infra/python-storyboardclient/storyboardclient/openstack/common/log.py/_load_log_config
|
7,555
|
def is_relevant_alias(self, alias):
(namespace, nid) = alias
try:
nid = nid.lower()
except __HOLE__:
return False
if (namespace == "url"):
if ("twitter.com" in nid) and ("/status/" in nid):
return True
return False
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/twitter_tweet.py/Twitter_Tweet.is_relevant_alias
|
7,556
|
def getProcessor(input, output, config):
plugins = plugin.getPlugins(IProcessor)
for plug in plugins:
if plug.name == input:
module = reflect.namedModule(plug.moduleName)
break
else:
# try treating it as a module name
try:
module = reflect.namedModule(input)
except __HOLE__:
print '%s: no such input: %s' % (sys.argv[0], input)
return
try:
return process.getProcessor(module, output, config)
except process.NoProcessorError, e:
print "%s: %s" % (sys.argv[0], e)
|
ImportError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/lore/scripts/lore.py/getProcessor
|
7,557
|
def RunCommand(self):
"""Command entry point for the test command."""
if not unittest:
raise CommandException('On Python 2.6, the unittest2 module is required '
'to run the gsutil tests.')
failfast = False
list_tests = False
max_parallel_tests = _DEFAULT_TEST_PARALLEL_PROCESSES
perform_coverage = False
sequential_only = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-c':
perform_coverage = True
elif o == '-f':
failfast = True
elif o == '-l':
list_tests = True
elif o == ('--' + _SEQUENTIAL_ISOLATION_FLAG):
# Called to isolate a single test in a separate process.
# Don't try to isolate it again (would lead to an infinite loop).
sequential_only = True
elif o == '-p':
max_parallel_tests = long(a)
elif o == '-s':
if not tests.util.HAS_S3_CREDS:
raise CommandException('S3 tests require S3 credentials. Please '
'add appropriate credentials to your .boto '
'file and re-run.')
tests.util.RUN_S3_TESTS = True
elif o == '-u':
tests.util.RUN_INTEGRATION_TESTS = False
if perform_coverage and not coverage:
raise CommandException(
'Coverage has been requested but the coverage module was not found. '
'You can install it with "pip install coverage".')
if (tests.util.RUN_S3_TESTS and
max_parallel_tests > _DEFAULT_S3_TEST_PARALLEL_PROCESSES):
self.logger.warn(
'Reducing parallel tests to %d due to S3 maximum bucket '
'limitations.', _DEFAULT_S3_TEST_PARALLEL_PROCESSES)
max_parallel_tests = _DEFAULT_S3_TEST_PARALLEL_PROCESSES
test_names = sorted(GetTestNames())
if list_tests and not self.args:
print 'Found %d test names:' % len(test_names)
print ' ', '\n '.join(sorted(test_names))
return 0
# Set list of commands to test if supplied.
if self.args:
commands_to_test = []
for name in self.args:
if name in test_names or name.split('.')[0] in test_names:
commands_to_test.append('gslib.tests.test_%s' % name)
else:
commands_to_test.append(name)
else:
commands_to_test = ['gslib.tests.test_%s' % name for name in test_names]
# Installs a ctrl-c handler that tries to cleanly tear down tests.
unittest.installHandler()
loader = unittest.TestLoader()
if commands_to_test:
try:
suite = loader.loadTestsFromNames(commands_to_test)
except (__HOLE__, AttributeError) as e:
raise CommandException('Invalid test argument name: %s' % e)
if list_tests:
test_names = GetTestNamesFromSuites(suite)
print 'Found %d test names:' % len(test_names)
print ' ', '\n '.join(sorted(test_names))
return 0
if logging.getLogger().getEffectiveLevel() <= logging.INFO:
verbosity = 1
else:
verbosity = 2
logging.disable(logging.ERROR)
if perform_coverage:
# We want to run coverage over the gslib module, but filter out the test
# modules and any third-party code. We also filter out anything under the
# temporary directory. Otherwise, the gsutil update test (which copies
# code to the temporary directory) gets included in the output.
coverage_controller = coverage.coverage(
source=['gslib'], omit=['gslib/third_party/*', 'gslib/tests/*',
tempfile.gettempdir() + '*'])
coverage_controller.erase()
coverage_controller.start()
num_parallel_failures = 0
sequential_success = False
(sequential_tests, isolated_tests,
parallel_unit_tests, parallel_integration_tests) = (
SplitParallelizableTestSuite(suite))
# Since parallel integration tests are run in a separate process, they
# won't get the override to tests.util, so skip them here.
if not tests.util.RUN_INTEGRATION_TESTS:
parallel_integration_tests = []
logging.debug('Sequential tests to run: %s', sequential_tests)
logging.debug('Isolated tests to run: %s', isolated_tests)
logging.debug('Parallel unit tests to run: %s', parallel_unit_tests)
logging.debug('Parallel integration tests to run: %s',
parallel_integration_tests)
# If we're running an already-isolated test (spawned in isolation by a
# previous test process), or we have no parallel tests to run,
# just run sequentially. For now, unit tests are always run sequentially.
run_tests_sequentially = (sequential_only or
(len(parallel_integration_tests) <= 1
and not isolated_tests))
if run_tests_sequentially:
total_tests = suite.countTestCases()
resultclass = MakeCustomTestResultClass(total_tests)
runner = unittest.TextTestRunner(verbosity=verbosity,
resultclass=resultclass,
failfast=failfast)
ret = runner.run(suite)
sequential_success = ret.wasSuccessful()
else:
if max_parallel_tests == 1:
# We can't take advantage of parallelism, though we may have tests that
# need isolation.
sequential_tests += parallel_integration_tests
parallel_integration_tests = []
sequential_start_time = time.time()
# TODO: For now, run unit tests sequentially because they are fast.
# We could potentially shave off several seconds of execution time
# by executing them in parallel with the integration tests.
if len(sequential_tests) + len(parallel_unit_tests):
print 'Running %d tests sequentially.' % (len(sequential_tests) +
len(parallel_unit_tests))
sequential_tests_to_run = sequential_tests + parallel_unit_tests
suite = loader.loadTestsFromNames(
sorted([test_name for test_name in sequential_tests_to_run]))
num_sequential_tests = suite.countTestCases()
resultclass = MakeCustomTestResultClass(num_sequential_tests)
runner = unittest.TextTestRunner(verbosity=verbosity,
resultclass=resultclass,
failfast=failfast)
ret = runner.run(suite)
sequential_success = ret.wasSuccessful()
else:
num_sequential_tests = 0
sequential_success = True
sequential_time_elapsed = time.time() - sequential_start_time
# At this point, all tests get their own process so just treat the
# isolated tests as parallel tests.
parallel_integration_tests += isolated_tests
num_parallel_tests = len(parallel_integration_tests)
if not num_parallel_tests:
pass
else:
num_processes = min(max_parallel_tests, num_parallel_tests)
if num_parallel_tests > 1 and max_parallel_tests > 1:
message = 'Running %d tests in parallel mode (%d processes).'
if num_processes > _DEFAULT_TEST_PARALLEL_PROCESSES:
message += (
' Please be patient while your CPU is incinerated. '
'If your machine becomes unresponsive, consider reducing '
'the amount of parallel test processes by running '
'\'gsutil test -p <num_processes>\'.')
print ('\n'.join(textwrap.wrap(
message % (num_parallel_tests, num_processes))))
else:
print ('Running %d tests sequentially in isolated processes.' %
num_parallel_tests)
(num_parallel_failures, parallel_time_elapsed) = self.RunParallelTests(
parallel_integration_tests, max_parallel_tests,
coverage_controller.data.filename if perform_coverage else None)
self.PrintTestResults(
num_sequential_tests, sequential_success,
sequential_time_elapsed,
num_parallel_tests, num_parallel_failures,
parallel_time_elapsed)
if perform_coverage:
coverage_controller.stop()
coverage_controller.combine()
coverage_controller.save()
print ('Coverage information was saved to: %s' %
coverage_controller.data.filename)
if sequential_success and not num_parallel_failures:
ResetFailureCount()
return 0
return 1
|
ImportError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/commands/test.py/TestCommand.RunCommand
|
7,558
|
def _put(self, key, value, ttl_secs):
if ttl_secs in (NOT_SET, FOREVER):
# if we do not care about ttl, just use set
# in redis, using SET will also clear the timeout
# note that this assumes that there is no way in redis
# to set a default timeout on keys
self.redis.set(key, value)
else:
ittl = None
try:
ittl = int(ttl_secs)
except __HOLE__:
pass # let it blow up further down
if ittl == ttl_secs:
self.redis.setex(key, ittl, value)
else:
self.redis.psetex(key, int(ttl_secs * 1000), value)
return key
|
ValueError
|
dataset/ETHPy150Open mbr/simplekv/simplekv/memory/redisstore.py/RedisStore._put
|
7,559
|
def send(self, data, headers):
"""
Sends a request to a remote webserver using HTTP POST.
"""
req = urllib2.Request(self._url, headers=headers)
try:
response = urlopen(
url=req,
data=data,
timeout=self.timeout,
verify_ssl=self.verify_ssl,
ca_certs=self.ca_certs,
)
except urllib2.HTTPError as exc:
msg = exc.headers.get('x-sentry-error')
code = exc.getcode()
if code == 429:
try:
retry_after = int(exc.headers.get('retry-after'))
except (__HOLE__, TypeError):
retry_after = 0
raise RateLimited(msg, retry_after)
elif msg:
raise APIError(msg, code)
else:
raise
return response
|
ValueError
|
dataset/ETHPy150Open getsentry/raven-python/raven/transport/http.py/HTTPTransport.send
|
7,560
|
def execute(command, cwd=None, env=None):
try:
st = subprocess.PIPE
proc = subprocess.Popen(
args=command, stdout=st, stderr=st, stdin=st, cwd=cwd, env=env)
(output, error) = proc.communicate()
code = proc.returncode
return code, output, error
except __HOLE__ as error:
return -1, "", error
|
OSError
|
dataset/ETHPy150Open mozilla/pontoon/pontoon/sync/vcs/repositories.py/execute
|
7,561
|
@classmethod
def from_string(cls, string):
"""
Constructs a :class:`mw.Timestamp` from a MediaWiki formatted string.
This method is provides a convenient way to construct from common
MediaWiki timestamp formats. E.g., ``%Y%m%d%H%M%S`` and
``%Y-%m-%dT%H:%M:%SZ``.
:Parameters:
string : str
A formatted timestamp
:Returns:
:class:`mw.Timestamp`
"""
if type(string) == bytes:
string = str(string, 'utf8')
else:
string = str(string)
try:
return cls.strptime(string, SHORT_MW_TIME_STRING)
except ValueError as e:
try:
return cls.strptime(string, LONG_MW_TIME_STRING)
except __HOLE__ as e:
raise ValueError(
"{0} is not a valid Wikipedia date format".format(
repr(string)
)
)
return cls.from_time_struct(time_struct)
|
ValueError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.from_string
|
7,562
|
def __eq__(self, other):
try:
return self.__time == other.__time
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__eq__
|
7,563
|
def __lt__(self, other):
try:
return self.__time < other.__time
except __HOLE__:
return NotImplemented
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__lt__
|
7,564
|
def __gt__(self, other):
try:
return self.__time > other.__time
except __HOLE__:
return NotImplemented
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__gt__
|
7,565
|
def __le__(self, other):
try:
return self.__time <= other.__time
except __HOLE__:
return NotImplemented
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__le__
|
7,566
|
def __ge__(self, other):
try:
return self.__time >= other.__time
except __HOLE__:
return NotImplemented
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__ge__
|
7,567
|
def __ne__(self, other):
try:
return not self.__time == other.__time
except __HOLE__:
return NotImplemented
|
AttributeError
|
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/types/timestamp.py/Timestamp.__ne__
|
7,568
|
def match(self, url, environ=None, sub_domains=False,
sub_domains_ignore=None, domain_match=''):
"""Match a url to our regexp.
While the regexp might match, this operation isn't
guaranteed as there's other factors that can cause a match to
fail even though the regexp succeeds (Default that was relied
on wasn't given, requirement regexp doesn't pass, etc.).
Therefore the calling function shouldn't assume this will
return a valid dict, the other possible return is False if a
match doesn't work out.
"""
# Static routes don't match, they generate only
if self.static:
return False
match = self.regmatch.match(url)
if not match:
return False
sub_domain = None
if sub_domains and environ and 'HTTP_HOST' in environ:
host = environ['HTTP_HOST'].split(':')[0]
sub_match = re.compile('^(.+?)\.%s$' % domain_match)
subdomain = re.sub(sub_match, r'\1', host)
if subdomain not in sub_domains_ignore and host != subdomain:
sub_domain = subdomain
if self.conditions:
if 'method' in self.conditions and environ and \
environ['REQUEST_METHOD'] not in self.conditions['method']:
return False
# Check sub-domains?
use_sd = self.conditions.get('sub_domain')
if use_sd and not sub_domain:
return False
elif not use_sd and 'sub_domain' in self.conditions and sub_domain:
return False
if isinstance(use_sd, list) and sub_domain not in use_sd:
return False
matchdict = match.groupdict()
result = {}
extras = self._default_keys - frozenset(matchdict.keys())
for key, val in six.iteritems(matchdict):
if key != 'path_info' and self.encoding:
# change back into python unicode objects from the URL
# representation
try:
val = as_unicode(val, self.encoding, self.decode_errors)
except __HOLE__:
return False
if not val and key in self.defaults and self.defaults[key]:
result[key] = self.defaults[key]
else:
result[key] = val
for key in extras:
result[key] = self.defaults[key]
# Add the sub-domain if there is one
if sub_domains:
result['sub_domain'] = sub_domain
# If there's a function, call it with environ and expire if it
# returns False
if self.conditions and 'function' in self.conditions and \
not self.conditions['function'](environ, result):
return False
return result
|
UnicodeDecodeError
|
dataset/ETHPy150Open bbangert/routes/routes/route.py/Route.match
|
7,569
|
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except __HOLE__:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
|
TypeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/auth/__init__.py/authenticate
|
7,570
|
def get_user_model():
"""
Returns the User model that is active in this project.
"""
from django.db.models import get_model
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
except __HOLE__:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL)
return user_model
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/auth/__init__.py/get_user_model
|
7,571
|
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except (__HOLE__, AssertionError):
user = AnonymousUser()
return user
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/auth/__init__.py/get_user
|
7,572
|
def test_read():
p = sp.Popen(args=['python', '-'], stdin=sp.PIPE, stdout=sp.PIPE)
p.stdin.write(script)
p.stdin.close()
f = stream.Reader(p.stdout)
result = list(zip(range(10), f))
p.kill()
j = 0
for i, buf in result:
assert i == j
assert len(buf) == f.bufsize
j += 1
try:
next(f)
except __HOLE__ as e:
assert e.args == ('timeout',)
|
IOError
|
dataset/ETHPy150Open romanz/amodem/amodem/tests/test_stream.py/test_read
|
7,573
|
def destroy_infra(databaseinfra, task=None):
try:
database = databaseinfra.databases.get()
LOG.debug('Database found! {}'.format(database))
except __HOLE__:
LOG.info("Database not found...")
if not databaseinfra.plan.provider == databaseinfra.plan.CLOUDSTACK:
LOG.error('Databaseinfra is not cloudstack infra')
return True
instances = []
hosts = []
for instance in databaseinfra.instances.all():
instances.append(instance)
hosts.append(instance.hostname)
workflow_dict = build_dict(plan=databaseinfra.plan,
environment=databaseinfra.environment,
steps=get_deploy_settings(
databaseinfra.plan.engine_type.name),
qt=get_vm_qt(plan=databaseinfra.plan),
dbtype=str(databaseinfra.plan.engine_type),
hosts=hosts,
instances=instances,
databaseinfra=databaseinfra,
database=database
)
if stop_workflow(workflow_dict=workflow_dict, task=task):
return workflow_dict
else:
return False
|
IndexError
|
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/util/providers.py/destroy_infra
|
7,574
|
def get_current_stack():
call_stack = []
for line in pykd.dbgCommand("k").splitlines()[1:]:
try:
_, ret_addr, sym = line.split()
_ = int(ret_addr, 16)
except __HOLE__:
continue
call_stack.append(sym)
return call_stack
|
ValueError
|
dataset/ETHPy150Open blackberry/ALF/lib/dbgmin/scripts/WinDBGMemoryLimit.py/get_current_stack
|
7,575
|
def requested_mem_size():
possible_bp_syms = ["calloc", "malloc", "realloc"]
sym = None
for line in pykd.dbgCommand("kb").splitlines()[1:]:
try:
_, _, arg0, arg1, _, sym = line.split()
arg0 = int(arg0, 16)
arg1 = int(arg1, 16)
sym = sym.split("!")[1].strip()
except (ValueError, __HOLE__):
continue
if sym in possible_bp_syms:
break
sym = None
if sym == "calloc":
ret_val = arg0 * arg1
elif sym == "malloc":
ret_val = arg0
elif sym == "realloc":
ret_val = arg1
else:
ret_val = 0
return ret_val
|
IndexError
|
dataset/ETHPy150Open blackberry/ALF/lib/dbgmin/scripts/WinDBGMemoryLimit.py/requested_mem_size
|
7,576
|
@pytest.fixture(scope="module")
def server(request):
p1 = TProcessor(mux.ThingOneService, DispatcherOne())
p2 = TProcessor(mux.ThingTwoService, DispatcherTwo())
mux_proc = TMultiplexedProcessor()
mux_proc.register_processor("ThingOneService", p1)
mux_proc.register_processor("ThingTwoService", p2)
_server = TThreadedServer(mux_proc, TServerSocket(unix_socket=sock_path),
iprot_factory=TBinaryProtocolFactory(),
itrans_factory=TBufferedTransportFactory())
ps = multiprocessing.Process(target=_server.serve)
ps.start()
time.sleep(0.1)
def fin():
if ps.is_alive():
ps.terminate()
try:
os.remove(sock_path)
except __HOLE__:
pass
request.addfinalizer(fin)
|
IOError
|
dataset/ETHPy150Open eleme/thriftpy/tests/test_multiplexed.py/server
|
7,577
|
def del_context(self, key):
"""
Removes an element from the context dictionary.
Example
-------
>>> class FooView(ContextMixin):
... context = {
... 'foo': 'bar',
... }
...
>>> view = FooView()
>>> view.del_context('foo')
{}
Arguments
---------
key : str
The context key name
Returns
-------
bool
Success or Failure
Returns
-------
dict
The new context
"""
context = self.get_context()
try:
context.pop(key, None)
self._context = context
except __HOLE__:
return False
return context
|
KeyError
|
dataset/ETHPy150Open thisissoon/Flask-Velox/flask_velox/mixins/context.py/ContextMixin.del_context
|
7,578
|
def parse_duration(duration):
# (-)PnYnMnDTnHnMnS
index = 0
if duration[0] == '-':
sign = '-'
index += 1
else:
sign = '+'
assert duration[index] == "P"
index += 1
dic = dict([(typ, 0) for (code, typ) in D_FORMAT])
for code, typ in D_FORMAT:
# print duration[index:], code
if duration[index] == '-':
raise TimeUtilError("Negation not allowed on individual items")
if code == "T":
if duration[index] == "T":
index += 1
if index == len(duration):
raise TimeUtilError("Not allowed to end with 'T'")
else:
raise TimeUtilError("Missing T")
else:
try:
mod = duration[index:].index(code)
try:
dic[typ] = int(duration[index:index + mod])
except ValueError:
if code == "S":
try:
dic[typ] = float(duration[index:index + mod])
except ValueError:
raise TimeUtilError("Not a float")
else:
raise TimeUtilError(
"Fractions not allow on anything byt seconds")
index = mod + index + 1
except __HOLE__:
dic[typ] = 0
if index == len(duration):
break
return sign, dic
|
ValueError
|
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/time_util.py/parse_duration
|
7,579
|
def str_to_time(timestr, time_format=TIME_FORMAT):
"""
:param timestr:
:param time_format:
:return: UTC time
"""
if not timestr:
return 0
try:
then = time.strptime(timestr, time_format)
except __HOLE__: # assume it's a format problem
try:
elem = TIME_FORMAT_WITH_FRAGMENT.match(timestr)
except Exception as exc:
print >> sys.stderr, "Exception: %s on %s" % (exc, timestr)
raise
then = time.strptime(elem.groups()[0] + "Z", TIME_FORMAT)
return time.gmtime(calendar.timegm(then))
|
ValueError
|
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/time_util.py/str_to_time
|
7,580
|
def authenticate(self, code=None, **credentials):
try:
user = get_user_model().objects.get(**credentials)
if not self.verify_user(user):
return None
if code is None:
return LoginCode.create_code_for_user(user)
else:
timeout = getattr(settings, 'NOPASSWORD_LOGIN_CODE_TIMEOUT', 900)
timestamp = datetime.now() - timedelta(seconds=timeout)
login_code = LoginCode.objects.get(user=user, code=code, timestamp__gt=timestamp)
user = login_code.user
user.code = login_code
login_code.delete()
return user
except (__HOLE__, get_user_model().DoesNotExist, LoginCode.DoesNotExist, FieldError):
return None
|
TypeError
|
dataset/ETHPy150Open relekang/django-nopassword/nopassword/backends/base.py/NoPasswordBackend.authenticate
|
7,581
|
def get_covar(self):
try:
return self.Cache['invL']
except __HOLE__:
self.Cache['invL'] = np.linalg.inv( self.L )
return self.Cache['invL']
|
KeyError
|
dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/distr/GaussDistr.py/GaussDistr.get_covar
|
7,582
|
def cholL(self):
try:
return self.Cache['cholL']
except __HOLE__:
self.Cache['cholL'] = scipy.linalg.cholesky(self.L) #UPPER by default
return self.Cache['cholL']
|
KeyError
|
dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/distr/GaussDistr.py/GaussDistr.cholL
|
7,583
|
def logdetL(self):
try:
return self.Cache['logdetL']
except __HOLE__:
logdetL = 2.0*np.sum( np.log( np.diag( self.cholL() ) ) )
self.Cache['logdetL'] =logdetL
return logdetL
######################################################### I/O Utils
#########################################################
|
KeyError
|
dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/distr/GaussDistr.py/GaussDistr.logdetL
|
7,584
|
def load_asset(self, fname):
""" Get the asset corresponding to the given name.
Parameters:
fname (str): the (relative) filename for the asset.
Returns:
asset (bytes): the asset content.
"""
try:
content = self._assets[fname]
except __HOLE__:
raise IndexError('Asset %r not known.' % fname)
if lookslikeafilename(content):
return self._cache_get(content)
else:
return content
|
KeyError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/app/assetstore.py/AssetStore.load_asset
|
7,585
|
def get_random_string(length=24, allowed_chars=None):
""" Produce a securely generated random string.
With a length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
allowed_chars = allowed_chars or ('abcdefghijklmnopqrstuvwxyz' +
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
try:
srandom = random.SystemRandom()
except __HOLE__:
srandom = random
logging.warn('Falling back to less secure Mersenne Twister random string.')
bogus = "%s%s%s" % (random.getstate(), time.time(), 'sdkhfbsdkfbsdbhf')
random.seed(hashlib.sha256(bogus.encode()).digest())
return ''.join(srandom.choice(allowed_chars) for i in range(length))
|
NotImplementedError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/app/assetstore.py/get_random_string
|
7,586
|
def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format
"""
feedsdir = bytestring_path(self.config['dir'].as_filename())
formats = self.config['formats'].as_str_seq()
relative_to = self.config['relative_to'].get() \
or self.config['dir'].as_filename()
relative_to = bytestring_path(relative_to)
paths = []
for item in items:
if self.config['absolute_path']:
paths.append(item.path)
else:
try:
relpath = os.path.relpath(item.path, relative_to)
except __HOLE__:
# On Windows, it is sometimes not possible to construct a
# relative path (if the files are on different disks).
relpath = item.path
paths.append(relpath)
if 'm3u' in formats:
m3u_basename = bytestring_path(
self.config['m3u_name'].get(unicode))
m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths)
if 'm3u_multi' in formats:
m3u_path = _build_m3u_filename(basename)
_write_m3u(m3u_path, paths)
if 'link' in formats:
for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest))
if 'echo' in formats:
self._log.info(u"Location of imported music:")
for path in paths:
self._log.info(u" {0}", path)
|
ValueError
|
dataset/ETHPy150Open beetbox/beets/beetsplug/importfeeds.py/ImportFeedsPlugin._record_items
|
7,587
|
def _process_result(self, result):
self._api.limit_remaining = None
self._api.limit = None
self._api.limit_reset = None
if not self._spec.streaming:
try:
self._api.limit_remaining = int(result.headers["X-Rate-Limit-Remaining"])
self._api.limit = int(result.headers["X-Rate-Limit-Limit"])
self._api.limit_reset = datetime.datetime.utcfromtimestamp(float(result.headers["X-Rate-Limit-Reset"]))
except __HOLE__: # Ignore any errors from trying to get headers
pass
result = result.json()
elif result.strip():
# Check that the result is not empty.
result = json.loads(result)
else:
return None
if isinstance(result, dict) and "previous_cursor" in result:
return models.ResultsPage(result, self._api)
if self._spec.model == None:
return result
else:
if isinstance(result, list):
return [self._spec.model(r, self._api) for r in result]
else:
return self._spec.model(result, self._api)
|
KeyError
|
dataset/ETHPy150Open domrout/python-twitter-wrapper/twitterwrapper/_api.py/ApiMethod._process_result
|
7,588
|
def __init__(self, source, width, height, dest=None, proc=None, *args,
**kwargs):
self.source = source
self.width = width
self.height = height
self.processor = get_processor(proc)(*args, **kwargs)
if dest is None:
dest = build_thumbnail_name(source, width, height, self.processor)
self.dest = dest
self.cache_dir = getattr(settings, 'CUDDLYBUDDLY_THUMBNAIL_CACHE', None)
for var in ('width', 'height'):
try:
setattr(self, var, int(getattr(self, var)))
except __HOLE__:
raise ThumbnailException('Value supplied for \'%s\' is not an int' % var)
if self.processor is None:
raise ThumbnailException('There is no image processor available')
self.generate()
|
ValueError
|
dataset/ETHPy150Open kylemacfarlane/django-cb-thumbnail/src/cuddlybuddly/thumbnail/main.py/Thumbnail.__init__
|
7,589
|
def generate(self):
if hasattr(self.dest, 'write'):
self._do_generate()
else:
do_generate = False
if self.cache_dir is not None:
if isinstance(self.source, FieldFile) or \
isinstance(self.source, File):
source = force_unicode(self.source)
elif not isinstance(self.source, basestring):
source = pickle.dumps(self.source.read())
self.source.seek(0)
else:
source = smart_str(force_unicode(self.source))
source = os.path.join(self.cache_dir,
hashlib.md5(source).hexdigest())
if not os.path.exists(source):
path = os.path.split(source)[0]
if not os.path.exists(path):
os.makedirs(path)
open(source, 'w').close()
if not isinstance(self.dest, basestring):
dest = pickle.dumps(self.dest.read())
self.dest.seek(0)
else:
dest = smart_str(force_unicode(self.dest))
dest = os.path.join(self.cache_dir,
hashlib.md5(dest).hexdigest())
else:
source = force_unicode(self.source)
dest = self.dest
if hasattr(default_storage, 'modified_time') and not self.cache_dir:
try:
source_mod_time = default_storage.modified_time(source)
except EnvironmentError:
# Means the source file doesn't exist, so nothing can be
# done.
do_generate = False
else:
try:
dest_mod_time = default_storage.modified_time(dest)
except EnvironmentError:
# Means the destination file doesn't exist so it must be
# generated.
do_generate = True
else:
do_generate = source_mod_time > dest_mod_time
else:
if not self.cache_dir:
source_cache = os.path.join(settings.MEDIA_ROOT, source)
dest_cache = os.path.join(settings.MEDIA_ROOT, dest)
else:
source_cache, dest_cache = source, dest
try:
do_generate = os.path.getmtime(source_cache) > \
os.path.getmtime(dest_cache)
except __HOLE__:
do_generate = True
if do_generate:
if self.cache_dir is not None:
path = os.path.split(dest)[0]
if not os.path.exists(path):
os.makedirs(path)
open(dest, 'w').close()
try:
self._do_generate()
except:
if self.cache_dir is not None:
if os.path.exists(dest):
os.remove(dest)
raise
|
OSError
|
dataset/ETHPy150Open kylemacfarlane/django-cb-thumbnail/src/cuddlybuddly/thumbnail/main.py/Thumbnail.generate
|
7,590
|
def _do_generate(self):
if isinstance(self.source, Image.Image):
data = self.source
else:
try:
if not hasattr(self.source, 'readline'):
if not hasattr(self.source, 'read'):
source = force_unicode(self.source)
if not default_storage.exists(source):
raise ThumbnailException('Source does not exist: %s'
% self.source)
file = default_storage.open(source, 'rb')
content = ContentFile(file.read())
file.close()
else:
content = ContentFile(self.source.read())
else:
content = ContentFile(self.source.read())
data = Image.open(content)
except IOError, detail:
raise ThumbnailException('%s: %s' % (detail, self.source))
except MemoryError:
raise ThumbnailException('Memory Error: %s' % self.source)
filelike = hasattr(self.dest, 'write')
if not filelike:
dest = StringIO()
else:
dest = self.dest
data = self.processor.generate_thumbnail(data, self.width, self.height)
filename = force_unicode(self.dest)
try:
data.save(dest, optimize=1, **self.processor.get_save_options(filename, data))
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default)
try:
data.save(dest, **self.processor.get_save_options(filename, data))
except __HOLE__, e:
raise ThumbnailException(e)
if hasattr(self.source, 'seek'):
self.source.seek(0)
if filelike:
dest.seek(0)
else:
if default_storage.exists(filename):
default_storage.delete(filename)
default_storage.save(filename, ContentFile(dest.getvalue()))
dest.close()
|
IOError
|
dataset/ETHPy150Open kylemacfarlane/django-cb-thumbnail/src/cuddlybuddly/thumbnail/main.py/Thumbnail._do_generate
|
7,591
|
def new(self, collection, report=None, properties=None, *args, **kwargs):
"""Returns a fresh class instance for a new entity.
ac = t1.new('atomic_creative') OR
ac = t1.new('atomic_creatives') OR even
ac = t1.new(terminalone.models.AtomicCreative)
"""
if type(collection) == type and issubclass(collection, Entity):
ret = collection
elif '_acl' in collection:
ret = ACL
else:
try:
ret = SINGULAR[collection]
except __HOLE__:
ret = CLASSES[collection]
if ret == Report:
return ret(self.session,
report=report,
environment=self.environment,
api_base=self.api_base,
**kwargs)
return ret(self.session,
environment=self.environment,
api_base=self.api_base,
properties=properties,
json=self.json,
*args, **kwargs)
|
KeyError
|
dataset/ETHPy150Open MediaMath/t1-python/terminalone/service.py/T1.new
|
7,592
|
@staticmethod
def _construct_url(collection, entity, child, limit):
"""Construct URL"""
url = [collection, ]
if entity is not None:
url.append(str(entity)) # str so that we can use join
child_id = None
if child is not None:
try:
child_path = CHILD_PATHS[child.lower()]
except AttributeError:
raise ClientError("`child` must be a string of the entity to retrieve")
except __HOLE__:
raise ClientError("`child` must correspond to an entity in T1")
# child_path should always be a tuple of (path, id). For children
# that do not have IDs, like concepts and permissions, ID is 0
if child_path[1]:
child_id = child_path[1]
url.append(child_path[0])
# All values need to be strings for join
url.append(str(child_path[1]))
else:
url.append(child_path[0])
if isinstance(limit, dict):
if len(limit) != 1:
raise ClientError('Limit must consist of one parent collection '
'(or chained parent collection) and a single '
'value for it (e.g. {"advertiser": 1}, or '
'{"advertiser.agency": 2)')
url.extend(['limit',
'{0!s}={1:d}'.format(*next(six.iteritems(limit)))])
return '/'.join(url), child_id
|
KeyError
|
dataset/ETHPy150Open MediaMath/t1-python/terminalone/service.py/T1._construct_url
|
7,593
|
def do_upgrade(env, version, cursor):
"""Move attachments from the `attachments` directory into `files`, hashing
the filenames in the process."""
path = env.path
old_dir = os.path.join(path, 'attachments')
if not os.path.exists(old_dir):
return
old_stat = os.stat(old_dir)
new_dir = os.path.join(path, 'files', 'attachments')
if not os.path.exists(new_dir):
os.makedirs(new_dir)
cursor.execute("""
SELECT type, id, filename FROM attachment ORDER BY type, id
""")
for row in cursor:
move_attachment_file(env, *row)
# Try to preserve permissions and ownerships of the attachments
# directory for $ENV/files
for dir, dirs, files in os.walk(os.path.join(path, 'files')):
try:
if hasattr(os, 'chmod'):
os.chmod(dir, old_stat.st_mode)
if hasattr(os, 'chflags') and hasattr(old_stat, 'st_flags'):
os.chflags(dir, old_stat.st_flags)
if hasattr(os, 'chown'):
os.chown(dir, old_stat.st_uid, old_stat.st_gid)
except __HOLE__:
pass
# Remove empty directory hierarchy
try:
for dir, dirs, files in os.walk(old_dir, topdown=False):
os.rmdir(dir)
except OSError as e:
env.log.warning("Can't delete old attachments directory %s: %s",
old_dir, exception_to_unicode(e))
# TRANSLATOR: Wrap message to 80 columns
printerr(_("""\
The upgrade of attachments was successful, but the old attachments directory:
%(src_dir)s
couldn't be removed, possibly due to the presence of files that weren't
referenced in the database. The error was:
%(exception)s
This error can be ignored, but for keeping your environment clean you should
backup any remaining files in that directory and remove it manually.
""", src_dir=old_dir, exception=exception_to_unicode(e)))
|
OSError
|
dataset/ETHPy150Open edgewall/trac/trac/upgrades/db28.py/do_upgrade
|
7,594
|
def move_attachment_file(env, parent_realm, parent_id, filename):
old_path = os.path.join(env.path, 'attachments', parent_realm,
unicode_quote(parent_id))
if filename:
old_path = os.path.join(old_path, unicode_quote(filename))
old_path = os.path.normpath(old_path)
if os.path.isfile(old_path):
new_path = Attachment._get_path(env.path, parent_realm, parent_id,
filename)
try:
os.renames(old_path, new_path)
except __HOLE__:
printerr(_("Unable to move attachment from:\n\n"
" %(old_path)s\n\nto:\n\n %(new_path)s\n",
old_path=old_path, new_path=new_path))
raise
else:
env.log.warning("Can't find file for 'attachment:%s:%s:%s', ignoring",
filename, parent_realm, parent_id)
|
OSError
|
dataset/ETHPy150Open edgewall/trac/trac/upgrades/db28.py/move_attachment_file
|
7,595
|
def get_theme(request):
this_theme = hz_themes.get_default_theme()
try:
theme = request.COOKIES[hz_themes.get_theme_cookie_name()]
for each_theme in hz_themes.get_themes():
if theme == each_theme[0]:
this_theme = each_theme[0]
except __HOLE__:
pass
return this_theme
|
KeyError
|
dataset/ETHPy150Open openstack/horizon/openstack_dashboard/templatetags/themes.py/get_theme
|
7,596
|
def expand_sv_ends(rec):
''' assign start and end positions to SV calls using conf. intervals if present '''
startpos, endpos = rec.start, rec.end
assert rec.is_sv
try:
endpos = int(rec.INFO.get('END')[0])
if rec.INFO.get('CIPOS'):
ci = map(int, rec.INFO.get('CIPOS'))
if ci[0] < 0:
startpos += ci[0]
if rec.INFO.get('CIEND'):
ci = map(int, rec.INFO.get('CIEND'))
if ci[0] > 0:
endpos += ci[0]
except __HOLE__ as e:
sys.stderr.write("error expanding sv interval: " + str(e) + " for record: " + str(rec) + "\n")
if startpos > endpos:
endpos, startpos = startpos, endpos
return startpos, endpos
|
TypeError
|
dataset/ETHPy150Open adamewing/bamsurgeon/scripts/evaluator.py/expand_sv_ends
|
7,597
|
def have_identical_haplotypes(v1, v2, ref):
"""Check if two variant produce the same haplotype / variant sequence.
- v1 and v2: PyVCF variants to compare
- ref: PySAM FastaFile
"""
assert (v1.is_indel or v1.is_snp) and (v2.is_indel or v2.is_snp)
if v1.CHROM != v2.CHROM:
return False
if v1.is_snp and v2.is_snp:
assert v1.REF.upper() == v2.REF.upper()
return str(v1.ALT[0]).upper() == str(v2.ALT[0]).upper()
if v1.is_snp or v2.is_snp:
# one snp one indel: can't produce identical results
return False
assert v1.is_indel and v2.is_indel
# only on allele per variant allowed
assert len(v1.ALT) == 1 and len(v2.ALT) == 1, (
+ "Can't handle multi-allelic entries")
# get the sequence context whic fully overlaps both variants.
# note: pyvcf is one-based, but start and end are zero-based half-open
start = min([v1.POS, v2.POS])-1
end = max([v1.POS + max([len(v1.REF), len(v1.ALT[0])]),
v2.POS + max([len(v2.REF), len(v2.ALT[0])])
])
chrom = v1.CHROM# made sure before they are identical before
seq = list(ref.fetch(chrom, start, end).upper())
if len(seq) != end-start:
# FIXME how to handle?
sys.stderr.write("WARN: Couldn't fetch full sequence window. Skipping"
" allele-aware comparison, otherwise indices would"
" be off\n")
raise NotImplementedError
v1_offset = v1.POS-1-start
v2_offset = v2.POS-1-start
# lower() in replacement for debugging purposes only
v1_seq = seq[:v1_offset] + list(str(v1.ALT[0]).lower()) + seq[v1_offset+len(v1.REF):]
v2_seq = seq[:v2_offset] + list(str(v2.ALT[0]).lower()) + seq[v2_offset+len(v2.REF):]
if False:
print "reference sequence context\t%s" % (''.join(seq))
print "v1 (offset %d) %s\t%s" % (v1_offset, v1, ''.join(v1_seq))
print "v2 (offset %d) %s\t%s" % (v2_offset, v2, ''.join(v2_seq))
print
try:
assert seq[v1_offset] == v1.REF[0].upper()
assert seq[v2_offset] == v2.REF[0].upper()
assert len(v1_seq) == len(seq) - len(v1.REF) + len(v1.ALT[0])
assert len(v2_seq) == len(seq) - len(v2.REF) + len(v2.ALT[0])
except __HOLE__:
#import pdb; pdb.set_trace()
raise
#if ''.join(v1_seq).upper() == ''.join(v2_seq).upper():
# print ''.join(v1_seq).upper()
return ''.join(v1_seq).upper() == ''.join(v2_seq).upper()
|
AssertionError
|
dataset/ETHPy150Open adamewing/bamsurgeon/scripts/evaluator.py/have_identical_haplotypes
|
7,598
|
def evaluate(submission, truth, vtype='SNV', reffa=None, ignorechroms=None, ignorepass=False,
fp_vcf=None, fn_vcf=None, tp_vcf=None,
debug=False):
''' return stats on sensitivity, specificity, balanced accuracy '''
assert vtype in ('SNV', 'SV', 'INDEL')
subvcfh = vcf.Reader(filename=submission)
truvcfh = vcf.Reader(filename=truth)
fpvcfh = fnvcfh = tpvcfh = None
if fp_vcf:
fpvcfh = vcf.Writer(open(fp_vcf, 'w'), template=subvcfh)
if fn_vcf:
fnvcfh = vcf.Writer(open(fn_vcf, 'w'), template=subvcfh)
if tp_vcf:
tpvcfh = vcf.Writer(open(tp_vcf, 'w'), template=subvcfh)
reffa_fh = None
if reffa:
reffa_fh = pysam.Fastafile(reffa)
if debug:
print "DEBUG: Using haplotype aware indel comparison"
tpcount = 0
fpcount = 0
subrecs = 0
trurecs = 0
truchroms = {}
fns = OrderedDict()
''' count records in truth vcf, track contigs/chromosomes '''
for trurec in truvcfh:
if relevant(trurec, vtype, ignorechroms):
trurecs += 1
truchroms[trurec.CHROM] = True
fns[str(trurec)] = trurec
used_truth = {} # keep track of 'truth' sites used, they should only be usable once
''' parse submission vcf, compare to truth '''
for subrec in subvcfh:
if passfilter(subrec, disabled=ignorepass):
if subrec.is_snp and vtype == 'SNV':
if not svmask(subrec, truvcfh, truchroms):
subrecs += 1
if subrec.is_sv and vtype == 'SV':
subrecs += 1
if subrec.is_indel and vtype == 'INDEL':
subrecs += 1
matched = False
startpos, endpos = subrec.start, subrec.end
if vtype == 'SV' and subrec.is_sv:
startpos, endpos = expand_sv_ends(subrec)
try:
if relevant(subrec, vtype, ignorechroms) and passfilter(subrec, disabled=ignorepass) and subrec.CHROM in truchroms:
for trurec in truvcfh.fetch(subrec.CHROM, startpos, end=endpos):
if match(subrec, trurec, vtype=vtype) and str(trurec) not in used_truth:
matched = True
if not matched and subrec.is_indel and reffa_fh:# try haplotype aware comparison
window = 100
for (trurec, _) in get_close_matches(subrec, truvcfh, window, indels_only=True):
if str(trurec) in used_truth:
continue
if have_identical_haplotypes(subrec, trurec, reffa_fh):
matched = True
if debug:
print "DEBUG: Rescuing %s which has same haplotype as %s" % (subrec, trurec)
break
if matched:
used_truth[str(trurec)] = True
except __HOLE__ as e:
sys.stderr.write("Warning: " + str(e) + "\n")
if matched:
tpcount += 1
if tpvcfh:
tpvcfh.write_record(subrec)
if fns.has_key(str(trurec)):
del fns[str(trurec)]
else:
if relevant(subrec, vtype, ignorechroms) and passfilter(subrec, disabled=ignorepass) and not svmask(subrec, truvcfh, truchroms):
fpcount += 1 # FP counting method needs to change for real tumors
if fpvcfh:
fpvcfh.write_record(subrec)
if fnvcfh:
for fn in fns.values():
fnvcfh.write_record(fn)
print "tpcount, fpcount, subrecs, trurecs:"
print tpcount, fpcount, subrecs, trurecs
recall = float(tpcount) / float(trurecs)
if tpcount+fpcount > 0:
precision = float(tpcount) / float(tpcount + fpcount)
else:
precision = 0.0
#fdr = 1.0 - float(fpcount) / float(subrecs)
f1score = 0.0 if tpcount == 0 else 2.0*(precision*recall)/(precision+recall)
for fh in [fpvcfh, fnvcfh, tpvcfh]:
if fh:
fh.close()
return precision, recall, f1score
|
ValueError
|
dataset/ETHPy150Open adamewing/bamsurgeon/scripts/evaluator.py/evaluate
|
7,599
|
def getUpToVersion():
grab_next = False
for arg in sys.argv:
if arg.strip() == "--to":
grab_next = True
elif arg.startswith("--to=") or grab_next == True:
try:
to_version = int(arg.replace("--to=",""))
print "~ Migrating to version: %(tv)s" % {'tv': to_version}
return to_version
except __HOLE__:
print "~ ERROR: unable to parse --to argument: '%(ta)s'" % { 'ta': arg }
return None
return None
# ~~~~~~~~~~~~~~~~~~~~~~ getVersion(dbname) is to look up the version number of the database
|
TypeError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/modules/migrate-1.4/commands.py/getUpToVersion
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.