code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""
AIM class to credit card payment with authorize.net
Fork of authnet code written by John Conde
http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
BSDv3 License
Modifed by Massimo Di Pierro
- ported from Python 3.x run on Python 2.4+
- fixed a couple of bugs
- merged with test so single file
- namedtuple from http://code.activestate.com/recipes/500261/
"""
__all__ = ['AIM']
from operator import itemgetter
import urllib
_known_tuple_types = {}
class NamedTupleBase(tuple):
"""Base class for named tuples with the __new__ operator set, named tuples
yielded by the namedtuple() function will subclass this and add
properties."""
def __new__(cls, *args, **kws):
"""Create a new instance of this fielded tuple"""
# May need to unpack named field values here
if kws:
values = list(args) + [None] * (len(cls._fields) - len(args))
fields = dict((val, idx) for idx, val in enumerate(cls._fields))
for kw, val in kws.iteritems():
assert kw in kws, "%r not in field list" % kw
values[fields[kw]] = val
args = tuple(values)
return tuple.__new__(cls, args)
def namedtuple(typename, fieldnames):
"""
>>> import namedtuples
>>> tpl = namedtuples.namedtuple(['a', 'b', 'c'])
>>> tpl(1, 2, 3)
(1, 2, 3)
>>> tpl(1, 2, 3).b
2
>>> tpl(c=1, a=2, b=3)
(2, 3, 1)
>>> tpl(c=1, a=2, b=3).b
3
>>> tpl(c='pads with nones')
(None, None, 'pads with nones')
>>> tpl(b='pads with nones')
(None, 'pads with nones', None)
>>>
"""
# Split up a string, some people do this
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.replace(',', ' ').split()
# Convert anything iterable that enumerates fields to a tuple now
fieldname_tuple = tuple(str(field) for field in fieldnames)
# See if we've cached this
if fieldname_tuple in _known_tuple_types:
return _known_tuple_types[fieldname_tuple]
# Make the type
new_tuple_type = type(typename, (NamedTupleBase,), {})
# Set the hidden field
new_tuple_type._fields = fieldname_tuple
# Add the getters
for i, field in enumerate(fieldname_tuple):
setattr(new_tuple_type, field, property(itemgetter(i)))
# Cache
_known_tuple_types[fieldname_tuple] = new_tuple_type
# Done
return new_tuple_type
class AIM:
class AIMError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return str(self.parameter)
def __init__(self, login, transkey, testmode=False):
if str(login).strip() == '' or login is None:
raise AIM.AIMError('No login name provided')
if str(transkey).strip() == '' or transkey is None:
raise AIM.AIMError('No transaction key provided')
if testmode != True and testmode != False:
raise AIM.AIMError('Invalid value for testmode. Must be True or False. "{0}" given.'.format(testmode))
self.testmode = testmode
self.proxy = None
self.delimiter = '|'
self.results = []
self.error = True
self.success = False
self.declined = False
self.parameters = {}
self.setParameter('x_delim_data', 'true')
self.setParameter('x_delim_char', self.delimiter)
self.setParameter('x_relay_response', 'FALSE')
self.setParameter('x_url', 'FALSE')
self.setParameter('x_version', '3.1')
self.setParameter('x_method', 'CC')
self.setParameter('x_type', 'AUTH_CAPTURE')
self.setParameter('x_login', login)
self.setParameter('x_tran_key', transkey)
def process(self):
encoded_args = urllib.urlencode(self.parameters)
if self.testmode == True:
url = 'https://test.authorize.net/gateway/transact.dll'
else:
url = 'https://secure.authorize.net/gateway/transact.dll'
if self.proxy is None:
self.results += str(urllib.urlopen(
url, encoded_args).read()).split(self.delimiter)
else:
opener = urllib.FancyURLopener(self.proxy)
opened = opener.open(url, encoded_args)
try:
self.results += str(opened.read()).split(self.delimiter)
finally:
opened.close()
Results = namedtuple('Results', 'ResultResponse ResponseSubcode ResponseCode ResponseText AuthCode \
AVSResponse TransactionID InvoiceNumber Description Amount PaymentMethod \
TransactionType CustomerID CHFirstName CHLastName Company BillingAddress \
BillingCity BillingState BillingZip BillingCountry Phone Fax Email ShippingFirstName \
ShippingLastName ShippingCompany ShippingAddress ShippingCity ShippingState \
ShippingZip ShippingCountry TaxAmount DutyAmount FreightAmount TaxExemptFlag \
PONumber MD5Hash CVVResponse CAVVResponse')
self.response = Results(*tuple(r for r in self.results)[0:40])
if self.getResultResponseFull() == 'Approved':
self.error = False
self.success = True
self.declined = False
elif self.getResultResponseFull() == 'Declined':
self.error = False
self.success = False
self.declined = True
else:
raise AIM.AIMError(self.response.ResponseText)
def setTransaction(self, creditcard, expiration, total, cvv=None, tax=None, invoice=None):
if str(creditcard).strip() == '' or creditcard is None:
raise AIM.AIMError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
if str(expiration).strip() == '' or expiration is None:
raise AIM.AIMError('No expiration number to setTransaction(): {0}'.format(expiration))
if str(total).strip() == '' or total is None:
raise AIM.AIMError('No total amount passed to setTransaction(): {0}'.format(total))
self.setParameter('x_card_num', creditcard)
self.setParameter('x_exp_date', expiration)
self.setParameter('x_amount', total)
if cvv is not None:
self.setParameter('x_card_code', cvv)
if tax is not None:
self.setParameter('x_tax', tax)
if invoice is not None:
self.setParameter('x_invoice_num', invoice)
def setTransactionType(self, transtype=None):
types = ['AUTH_CAPTURE', 'AUTH_ONLY', 'PRIOR_AUTH_CAPTURE',
'CREDIT', 'CAPTURE_ONLY', 'VOID']
if transtype.upper() not in types:
raise AIM.AIMError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
self.setParameter('x_type', transtype.upper())
def setProxy(self, proxy=None):
if str(proxy).strip() == '' or proxy is None:
raise AIM.AIMError('No proxy passed to setProxy()')
self.proxy = {'http': str(proxy).strip()}
def setParameter(self, key=None, value=None):
if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
self.parameters[key] = str(value).strip()
else:
raise AIM.AIMError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
def isApproved(self):
return self.success
def isDeclined(self):
return self.declined
def isError(self):
return self.error
def getResultResponseFull(self):
responses = ['', 'Approved', 'Declined', 'Error']
return responses[int(self.results[0])]
def process(creditcard, expiration, total, cvv=None, tax=None, invoice=None,
login='cnpdev4289', transkey='SR2P8g4jdEn7vFLQ', testmode=True):
payment = AIM(login, transkey, testmode)
expiration = expiration.replace('/', '')
payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice)
try:
payment.process()
return payment.isApproved()
except AIM.AIMError:
return False
def test():
import socket
import sys
from time import time
creditcard = '4427802641004797'
expiration = '122012'
total = '1.00'
cvv = '123'
tax = '0.00'
invoice = str(time())[4:10] # get a random invoice number
try:
payment = AIM('cnpdev4289', 'SR2P8g4jdEn7vFLQ', True)
payment.setTransaction(
creditcard, expiration, total, cvv, tax, invoice)
payment.setParameter(
'x_duplicate_window', 180) # three minutes duplicate windows
payment.setParameter('x_cust_id', '1324') # customer ID
payment.setParameter('x_first_name', 'John')
payment.setParameter('x_last_name', 'Conde')
payment.setParameter('x_company', 'Test Company')
payment.setParameter('x_address', '1234 Main Street')
payment.setParameter('x_city', 'Townsville')
payment.setParameter('x_state', 'NJ')
payment.setParameter('x_zip', '12345')
payment.setParameter('x_country', 'US')
payment.setParameter('x_phone', '800-555-1234')
payment.setParameter('x_description', 'Test Transaction')
payment.setParameter(
'x_customer_ip', socket.gethostbyname(socket.gethostname()))
payment.setParameter('x_email', 'john@example.com')
payment.setParameter('x_email_customer', False)
payment.process()
if payment.isApproved():
print 'Response Code: ', payment.response.ResponseCode
print 'Response Text: ', payment.response.ResponseText
print 'Response: ', payment.getResultResponseFull()
print 'Transaction ID: ', payment.response.TransactionID
print 'CVV Result: ', payment.response.CVVResponse
print 'Approval Code: ', payment.response.AuthCode
print 'AVS Result: ', payment.response.AVSResponse
elif payment.isDeclined():
print 'Your credit card was declined by your bank'
elif payment.isError():
raise AIM.AIMError('An uncaught error occurred')
except AIM.AIMError, e:
print "Exception thrown:", e
print 'An error occured'
print 'approved', payment.isApproved()
print 'declined', payment.isDeclined()
print 'error', payment.isError()
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
unescapeHTML,
)
class WistiaIE(InfoExtractor):
_VALID_URL = r'(?:wistia:|https?://(?:fast\.)?wistia\.net/embed/iframe/)(?P<id>[a-z0-9]+)'
_API_URL = 'http://fast.wistia.com/embed/medias/%s.json'
_IFRAME_URL = 'http://fast.wistia.net/embed/iframe/%s'
_TESTS = [{
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
'md5': 'cafeb56ec0c53c18c97405eecb3133df',
'info_dict': {
'id': 'sh7fpupwlt',
'ext': 'mov',
'title': 'Being Resourceful',
'description': 'a Clients From Hell Video Series video from worldwidewebhosting',
'upload_date': '20131204',
'timestamp': 1386185018,
'duration': 117,
},
}, {
'url': 'wistia:sh7fpupwlt',
'only_matching': True,
}, {
# with hls video
'url': 'wistia:807fafadvk',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
return unescapeHTML(match.group('url'))
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return 'wistia:%s' % match.group('id')
match = re.search(
r'''(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
''', webpage)
if match:
return 'wistia:%s' % match.group('id')
def _real_extract(self, url):
video_id = self._match_id(url)
data_json = self._download_json(
self._API_URL % video_id, video_id,
# Some videos require this.
headers={
'Referer': url if url.startswith('http') else self._IFRAME_URL % video_id,
})
if data_json.get('error'):
raise ExtractorError(
'Error while getting the playlist', expected=True)
data = data_json['media']
title = data['name']
formats = []
thumbnails = []
for a in data['assets']:
aurl = a.get('url')
if not aurl:
continue
astatus = a.get('status')
atype = a.get('type')
if (astatus is not None and astatus != 2) or atype in ('preview', 'storyboard'):
continue
elif atype in ('still', 'still_image'):
thumbnails.append({
'url': aurl,
'width': int_or_none(a.get('width')),
'height': int_or_none(a.get('height')),
})
else:
aext = a.get('ext')
is_m3u8 = a.get('container') == 'm3u8' or aext == 'm3u8'
formats.append({
'format_id': atype,
'url': aurl,
'tbr': int_or_none(a.get('bitrate')),
'vbr': int_or_none(a.get('opt_vbitrate')),
'width': int_or_none(a.get('width')),
'height': int_or_none(a.get('height')),
'filesize': int_or_none(a.get('size')),
'vcodec': a.get('codec'),
'container': a.get('container'),
'ext': 'mp4' if is_m3u8 else aext,
'protocol': 'm3u8' if is_m3u8 else None,
'preference': 1 if atype == 'original' else None,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': data.get('seoDescription'),
'formats': formats,
'thumbnails': thumbnails,
'duration': float_or_none(data.get('duration')),
'timestamp': int_or_none(data.get('createdAt')),
} | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.conch.test.test_keys -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Data used by test_keys as well as others.
"""
from __future__ import absolute_import, division
from base64 import decodestring
from twisted.python.compat import long
RSAData = {
'n': long('106248668575524741116943830949539894737212779118943280948138'
'20729711061576321820845393835692814935201176341295575504152775'
'16685881326038852354459895734875625093273594925884531272867425'
'864910490065695876046999646807138717162833156501'),
'e': long(35),
'd': long('667848773903298372735075508825679338348194611604786337388297'
'30301040958479737159599618395783408164121679859572188879144827'
'13602371850869127033494910375212470664166001439410214474266799'
'85974425203903884190893469297150446322896587555'),
'q': long('3395694744258061291019136154000709371890447462086362702627'
'9704149412726577280741108645721676968699696898960891593323'),
'p': long('3128922844292337321766351031842562691837301298995834258844'
'4720539204069737532863831050930719431498338835415515173887'),
'u': long('2777403202132551568802514199893235993376771442611051821485'
'0278129927603609294283482712900532542110958095343012272938')
}
DSAData = {
'g': long("10253261326864117157640690761723586967382334319435778695"
"29171533815411392477819921538350732400350395446211982054"
"96512489289702949127531056893725702005035043292195216541"
"11525058911428414042792836395195432445511200566318251789"
"10575695836669396181746841141924498545494149998282951407"
"18645344764026044855941864175"),
'p': long("10292031726231756443208850082191198787792966516790381991"
"77502076899763751166291092085666022362525614129374702633"
"26262930887668422949051881895212412718444016917144560705"
"45675251775747156453237145919794089496168502517202869160"
"78674893099371444940800865897607102159386345313384716752"
"18590012064772045092956919481"),
'q': long(1393384845225358996250882900535419012502712821577),
'x': long(1220877188542930584999385210465204342686893855021),
'y': long("14604423062661947579790240720337570315008549983452208015"
"39426429789435409684914513123700756086453120500041882809"
"10283610277194188071619191739512379408443695946763554493"
"86398594314468629823767964702559709430618263927529765769"
"10270265745700231533660131769648708944711006508965764877"
"684264272082256183140297951")
}
publicRSA_openssh = (b"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBE"
b"vLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYL"
b"h5KmRpslkYHRivcJSkbh/C+BR3utDS555mV comment")
privateRSA_openssh = b"""-----BEGIN RSA PRIVATE KEY-----
MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW
4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw
vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb
Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1
xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8
PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2
gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu
DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML
pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP
EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg==
-----END RSA PRIVATE KEY-----"""
# Some versions of OpenSSH generate these (slightly different keys)
privateRSA_openssh_alternate = b"""-----BEGIN RSA PRIVATE KEY-----
MIIBzjCCAcgCAQACYQCvMnHw5g6cmbN/i18ES8uLwNU+snf9z2TYj8DPrh/GMd/2
KbJEluLG1CGUf2V82NQjH7guaskflA1GwWmitwcMo5PBNNguHkqZGmyWRgdGK9wl
KRuH8L4FHe60NLnnmZUCASMCYG4ftVWX6+1n7SuZbuzB7ahNUtbz1mUGBN/lVJ/M
iQA8m2eH7GWgq81vZZCKl5BNxiGPqI3YWYZDtYGxtNdfLCIKYcElikcStJr4ehEc
SqiLdcSRCTu+BMpF2VeKDSfLIwIxANyfa9mYIVYRjelfA50K05NuE3dBPIVPAHD9
BVT/vD0Jv4P2l39kEJEE/qJnR1RCawIxAMtKS9BAR+hFUvfHrwwgbUMNtjmU+dql
5QMGdoMk64ihVaKo3hI7d0mSiqlx0gKT/wIwS6Rffc3CSWUatmm4GJX/Zb9XIZK1
qgx1Lg2bbZmCXhH4hQQWr1WCBdXTpWU9BvIzAjAXO7DkmaHRZwIq8j/kIPaLUgYy
d20DC6Uk6su3N2tgEnAv2MjsJA2iAh55w918ozMCMQC0c5dLUBCjF7OoR/E6FHZS
0TgqzxIUNMGoVEwpNYCgOLjw+kzEwoWr24eCutzr2yowAA==
------END RSA PRIVATE KEY------"""
# Encrypted with the passphrase 'encrypted'
privateRSA_openssh_encrypted = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,FFFFFFFFFFFFFFFF
30qUR7DYY/rpVJu159paRM1mUqt/IMibfEMTKWSjNhCVD21hskftZCJROw/WgIFt
ncusHpJMkjgwEpho0KyKilcC7zxjpunTex24Meb5pCdXCrYft8AyUkRdq3dugMqT
4nuWuWxziluBhKQ2M9tPGcEOeulU4vVjceZt2pZhZQVBf08o3XUv5/7RYd24M9md
WIo+5zdj2YQkI6xMFTP954O/X32ME1KQt98wgNEy6mxhItbvf00mH3woALwEKP3v
PSMxxtx3VKeDKd9YTOm1giKkXZUf91vZWs0378tUBrU4U5qJxgryTjvvVKOtofj6
4qQy6+r6M6wtwVlXBgeRm2gBPvL3nv6MsROp3E6ztBd/e7A8fSec+UTq3ko/EbGP
0QG+IG5tg8FsdITxQ9WAIITZL3Rc6hA5Ymx1VNhySp3iSiso8Jof27lku4pyuvRV
ko/B3N2H7LnQrGV0GyrjeYocW/qZh/PCsY48JBFhlNQexn2mn44AJW3y5xgbhvKA
3mrmMD1hD17ZvZxi4fPHjbuAyM1vFqhQx63eT9ijbwJ91svKJl5O5MIv41mCRonm
hxvOXw8S0mjSasyofptzzQCtXxFLQigXbpQBltII+Ys=
-----END RSA PRIVATE KEY-----"""
# Encrypted with the passphrase 'testxp'. NB: this key was generated by
# OpenSSH, so it doesn't use the same key data as the other keys here.
privateRSA_openssh_encrypted_aes = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,0673309A6ACCAB4B77DEE1C1E536AC26
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----"""
publicRSA_lsh = (b"{KDEwOnB1YmxpYy1rZXkoMTQ6cnNhLXBrY3MxLXNoYTEoMTpuOTc6AK8yc"
b"fDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW4sbUIZR/ZXzY1CMfuC5qyR+UDUbB"
b"aaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fwvgUd7rQ0ueeZlSkoMTplMTojKSkp}")
privateRSA_lsh = (b"(11:private-key(9:rsa-pkcs1(1:n97:\x00\xaf2q\xf0\xe6\x0e"
b"\x9c\x99\xb3\x7f\x8b_\x04K\xcb\x8b\xc0\xd5>\xb2w\xfd\xcfd\xd8\x8f\xc0\xcf"
b"\xae\x1f\xc61\xdf\xf6)\xb2D\x96\xe2\xc6\xd4!\x94\x7fe|\xd8\xd4#\x1f\xb8.j"
b"\xc9\x1f\x94\rF\xc1i\xa2\xb7\x07\x0c\xa3\x93\xc14\xd8.\x1eJ\x99\x1al\x96F"
b"\x07F+\xdc%)\x1b\x87\xf0\xbe\x05\x1d\xee\xb44\xb9\xe7\x99\x95)(1:e1:#)(1:d9"
b"6:n\x1f\xb5U\x97\xeb\xedg\xed+\x99n\xec\xc1\xed\xa8MR\xd6\xf3\xd6e\x06\x04"
b"\xdf\xe5T\x9f\xcc\x89\x00<\x9bg\x87\xece\xa0\xab\xcdoe\x90\x8a\x97\x90M\xc6"
b'!\x8f\xa8\x8d\xd8Y\x86C\xb5\x81\xb1\xb4\xd7_,"\na\xc1%\x8aG\x12\xb4\x9a\xf8'
b"z\x11\x1cJ\xa8\x8bu\xc4\x91\t;\xbe\x04\xcaE\xd9W\x8a\r\'\xcb#)(1:p49:\x00"
b"\xdc\x9fk\xd9\x98!V\x11\x8d\xe9_\x03\x9d\n\xd3\x93n\x13wA<\x85O\x00p\xfd"
b"\x05T\xff\xbc=\t\xbf\x83\xf6\x97\x7fd\x10\x91\x04\xfe\xa2gGTBk)(1:q49:\x00"
b"\xcbJK\xd0@G\xe8ER\xf7\xc7\xaf\x0c mC\r\xb69\x94\xf9\xda\xa5\xe5\x03\x06v"
b"\x83$\xeb\x88\xa1U\xa2\xa8\xde\x12;wI\x92\x8a\xa9q\xd2\x02\x93\xff)(1:a48:K"
b"\xa4_}\xcd\xc2Ie\x1a\xb6i\xb8\x18\x95\xffe\xbfW!\x92\xb5\xaa\x0cu.\r\x9bm"
b"\x99\x82^\x11\xf8\x85\x04\x16\xafU\x82\x05\xd5\xd3\xa5e=\x06\xf23)(1:b48:"
b"\x17;\xb0\xe4\x99\xa1\xd1g\x02*\xf2?\xe4 \xf6\x8bR\x062wm\x03\x0b\xa5$\xea"
b"\xcb\xb77k`\x12p/\xd8\xc8\xec$\r\xa2\x02\x1ey\xc3\xdd|\xa33)(1:c49:\x00\xb4"
b"s\x97KP\x10\xa3\x17\xb3\xa8G\xf1:\x14vR\xd18*\xcf\x12\x144\xc1\xa8TL)5\x80"
b"\xa08\xb8\xf0\xfaL\xc4\xc2\x85\xab\xdb\x87\x82\xba\xdc\xeb\xdb*)))")
privateRSA_agentv3 = (b"\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00`"
b"n\x1f\xb5U\x97\xeb\xedg\xed+\x99n\xec\xc1\xed\xa8MR\xd6\xf3\xd6e\x06\x04"
b"\xdf\xe5T\x9f\xcc\x89\x00<\x9bg\x87\xece\xa0\xab\xcdoe\x90\x8a\x97\x90M\xc6"
b'!\x8f\xa8\x8d\xd8Y\x86C\xb5\x81\xb1\xb4\xd7_,"\na\xc1%\x8aG\x12\xb4\x9a\xf8'
b"z\x11\x1cJ\xa8\x8bu\xc4\x91\t;\xbe\x04\xcaE\xd9W\x8a\r\'\xcb#\x00\x00\x00a"
b"\x00\xaf2q\xf0\xe6\x0e\x9c\x99\xb3\x7f\x8b_\x04K\xcb\x8b\xc0\xd5>\xb2w\xfd"
b"\xcfd\xd8\x8f\xc0\xcf\xae\x1f\xc61\xdf\xf6)\xb2D\x96\xe2\xc6\xd4!\x94\x7fe|"
b"\xd8\xd4#\x1f\xb8.j\xc9\x1f\x94\rF\xc1i\xa2\xb7\x07\x0c\xa3\x93\xc14\xd8."
b"\x1eJ\x99\x1al\x96F\x07F+\xdc%)\x1b\x87\xf0\xbe\x05\x1d\xee\xb44\xb9\xe7"
b"\x99\x95\x00\x00\x001\x00\xb4s\x97KP\x10\xa3\x17\xb3\xa8G\xf1:\x14vR\xd18*"
b"\xcf\x12\x144\xc1\xa8TL)5\x80\xa08\xb8\xf0\xfaL\xc4\xc2\x85\xab\xdb\x87\x82"
b"\xba\xdc\xeb\xdb*\x00\x00\x001\x00\xcbJK\xd0@G\xe8ER\xf7\xc7\xaf\x0c mC\r"
b"\xb69\x94\xf9\xda\xa5\xe5\x03\x06v\x83$\xeb\x88\xa1U\xa2\xa8\xde\x12;wI\x92"
b"\x8a\xa9q\xd2\x02\x93\xff\x00\x00\x001\x00\xdc\x9fk\xd9\x98!V\x11\x8d\xe9_"
b"\x03\x9d\n\xd3\x93n\x13wA<\x85O\x00p\xfd\x05T\xff\xbc=\t\xbf\x83\xf6\x97"
b"\x7fd\x10\x91\x04\xfe\xa2gGTBk")
publicDSA_openssh = b"""\
ssh-dss AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9\
LvFYmFFVMIuHFGlZpIL7sh3IMkqy+cssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G\
+yFuE8l0fVVUivos/MmYVJ66qT99htcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0\
EYmTNaFJ8CS0+vFSF4nYcyEnSQAAAIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB\
7zNw5aOnxjhoYLtiHeoEcOk2XOyvnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nD\
ioQX1oEMonR8N3AEO5v9SfBqS2Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQT\
NEpklRZqeBGo1gotJggNmVaIQNIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2G\
gdgMQWC7S6WFIXePGGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8= \
comment\
"""
privateDSA_openssh = b"""\
-----BEGIN DSA PRIVATE KEY-----
MIIBvAIBAAKBgQCSkDrFREVQ0CKQDRx/iQAMpaPS7xWJhRVTCLhxRpWaSC+7IdyD
JKsvnLLCDTP5Zxw935rAMi5VF2bbejw/S4GUWs7DzoKbbh/hruPBvshbhPJdH1VV
Ir6LPzJmFSeuqk/fYbXGSmra01mZ6VVu4BQAaKsPoXti2dIJHlNPksfWuQIVAPQR
iZM1oUnwJLT68VIXidhzISdJAoGBAJIC48RyQBO6hVnU6n+xGZ4Lu4wBpnQfZN2M
zB3tZXTMRAe8zcOWjp8Y4aGC7Yh3qBHDpNlzsr5xPlX4SFsFgHToikUxGZZos5L4
Rm6tAwnthDzF4q46lgAiOZw4qEF9aBDKJ0fDdwBDub/UnwaktkPUejga+pX9OEb8
KR2dFBrvAoGAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQNICl
GlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXeP
GGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8CFQDV2gbL
czUdxCus0pfEP1bddaXRLQ==
-----END DSA PRIVATE KEY-----\
"""
publicDSA_lsh = decodestring(b"""\
e0tERXdPbkIxWW14cFl5MXJaWGtvTXpwa2MyRW9NVHB3TVRJNU9nQ1NrRHJGUkVWUTBDS1FEUngv
aVFBTXBhUFM3eFdKaFJWVENMaHhScFdhU0MrN0lkeURKS3N2bkxMQ0RUUDVaeHc5MzVyQU1pNVZG
MmJiZWp3L1M0R1VXczdEem9LYmJoL2hydVBCdnNoYmhQSmRIMVZWSXI2TFB6Sm1GU2V1cWsvZlli
WEdTbXJhMDFtWjZWVnU0QlFBYUtzUG9YdGkyZElKSGxOUGtzZld1U2tvTVRweE1qRTZBUFFSaVpN
MW9VbndKTFQ2OFZJWGlkaHpJU2RKS1NneE9tY3hNams2QUpJQzQ4UnlRQk82aFZuVTZuK3hHWjRM
dTR3QnBuUWZaTjJNekIzdFpYVE1SQWU4emNPV2pwOFk0YUdDN1loM3FCSERwTmx6c3I1eFBsWDRT
RnNGZ0hUb2lrVXhHWlpvczVMNFJtNnRBd250aER6RjRxNDZsZ0FpT1p3NHFFRjlhQkRLSjBmRGR3
QkR1Yi9Vbndha3RrUFVlamdhK3BYOU9FYjhLUjJkRkJydktTZ3hPbmt4TWpnNkFoUnB4R01JV0V5
YUVoOFluamlhelFUTkVwa2xSWnFlQkdvMWdvdEpnZ05tVmFJUU5JQ2xHbEx5Q2kzNTllZkVVdVFj
WjlTWHhNNTlQK2hlY2MvR1UvR0hha1c1WVdFNGRQMkdnZGdNUVdDN1M2V0ZJWGVQR0dYcU5RRGRX
eGxYOHVtaGVudlFxYTFQbktyRlJoRHJKdzhaN0dqZEh4ZmxzeENFbVhQb0xOOHBLU2s9fQ==
""")
privateDSA_lsh = decodestring(b"""\
KDExOnByaXZhdGUta2V5KDM6ZHNhKDE6cDEyOToAkpA6xURFUNAikA0cf4kADKWj0u8ViYUVUwi4
cUaVmkgvuyHcgySrL5yywg0z+WccPd+awDIuVRdm23o8P0uBlFrOw86Cm24f4a7jwb7IW4TyXR9V
VSK+iz8yZhUnrqpP32G1xkpq2tNZmelVbuAUAGirD6F7YtnSCR5TT5LH1rkpKDE6cTIxOgD0EYmT
NaFJ8CS0+vFSF4nYcyEnSSkoMTpnMTI5OgCSAuPEckATuoVZ1Op/sRmeC7uMAaZ0H2TdjMwd7WV0
zEQHvM3Dlo6fGOGhgu2Id6gRw6TZc7K+cT5V+EhbBYB06IpFMRmWaLOS+EZurQMJ7YQ8xeKuOpYA
IjmcOKhBfWgQyidHw3cAQ7m/1J8GpLZD1Ho4GvqV/ThG/CkdnRQa7ykoMTp5MTI4OgIUacRjCFhM
mhIfGJ44ms0EzRKZJUWangRqNYKLSYIDZlWiEDSApRpS8got+fXnxFLkHGfUl8TOfT/oXnHPxlPx
h2pFuWFhOHT9hoHYDEFgu0ulhSF3jxhl6jUA3VsZV/LpoXp70KmtT5yqxUYQ6ycPGexo3R8X5bMQ
hJlz6CzfKSgxOngyMToA1doGy3M1HcQrrNKXxD9W3XWl0S0pKSk=
""")
privateDSA_agentv3 = decodestring(b"""\
AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9LvFYmFFVMIuHFGlZpIL7sh3IMkqy+c
ssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G+yFuE8l0fVVUivos/MmYVJ66qT99h
tcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0EYmTNaFJ8CS0+vFSF4nYcyEnSQAA
AIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB7zNw5aOnxjhoYLtiHeoEcOk2XOy
vnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nDioQX1oEMonR8N3AEO5v9SfBqS2
Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQ
NIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXePGGXqNQDd
WxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8AAAAVANXaBstzNR3EK6zSl8Q/Vt11
pdEt
""")
__all__ = ['DSAData', 'RSAData', 'privateDSA_agentv3', 'privateDSA_lsh',
'privateDSA_openssh', 'privateRSA_agentv3', 'privateRSA_lsh',
'privateRSA_openssh', 'publicDSA_lsh', 'publicDSA_openssh',
'publicRSA_lsh', 'publicRSA_openssh', 'privateRSA_openssh_alternate'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import subprocess
import os
import socket
import sys
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
])
# The ugly duckling
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.1', 'icehouse'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems():
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
apt.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt.config.set("Dir::Cache::pkgcache", "")
cache = apt.Cache()
try:
pkg = cache[package]
except:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
os_rel = None
def os_release(package, base='essex'):
'''
Returns OpenStack release codename from a cached global.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'juno': 'trusty-updates/juno',
'juno/updates': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address):
"""
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
return True
except socket.error:
return False
def ns_query(address):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython')
import dns.resolver
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, basestring):
rtype = 'A'
else:
return None
answers = dns.resolver.query(address, rtype)
if answers:
return str(answers[0])
return None
def get_host_ip(hostname):
"""
Resolves the IP for a given hostname, or returns
the input if it is already an IP.
"""
if is_ip(hostname):
return hostname
return ns_query(hostname)
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if is_ip(address):
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
else:
result = address
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0] | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
#!/usr/bin/env python
import sys
import unittest
sys.path.append('xypath')
import xypath
import messytables
try:
import hamcrest
except ImportError:
hamcrest = None
import re
import tcore
class Test_Import_Missing(tcore.TMissing):
def test_table_has_properties_at_all(self):
self.table.sheet
class Test_Import(tcore.TCore):
def test_table_has_sheet_properties(self):
self.assertIn('xlrd', repr(self.table.sheet))
#import
def test_from_filename_with_table_name(self):
"""Can we specify only the filename and 'name' of the table?"""
if hamcrest is None:
raise unittest.SkipTest("Requires Hamcrest")
table = xypath.Table.from_filename(
self.wpp_filename,
table_name='NOTES')
self.assertEqual(32, len(table))
table.filter(
hamcrest.contains_string('(2) Including Zanzibar.')).assert_one()
#import
def test_from_filename_with_table_index(self):
"""Can we specify only the filename and index of the table?"""
new_table = xypath.Table.from_filename(self.wpp_filename,
table_index=5)
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_table_index(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
new_table = xypath.Table.from_file_object(
f, extension, table_index=5)
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_table_name(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
new_table = xypath.Table.from_file_object(
f, extension, table_name='NOTES')
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_no_table_specifier(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
self.assertRaises(
TypeError,
lambda: xypath.Table.from_file_object(f, extension))
#import
def test_from_file_object_ambiguous_table_specifier(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
self.assertRaises(
TypeError,
lambda: xypath.Table.from_file_object(
f, extension, table_name='NOTES', table_index=4))
#import
def test_from_messy(self):
new_table = xypath.Table.from_messy(self.messy.tables[0])
self.assertEqual(265, len(new_table.filter('Estimates'))) | unknown | codeparrot/codeparrot-clean | ||
from base64 import b64decode
from os.path import join
from bundlewrap.utils.testing import make_repo, run
def test_b64encode_fault(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir))
assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n"
assert stderr == b""
assert rcode == 0
def test_encrypt(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir))
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
assert stdout == b"test\n"
assert stderr == b""
assert rcode == 0
def test_encrypt_different_key_autodetect(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir))
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
assert stdout == b"test\n"
assert stderr == b""
assert rcode == 0
def test_encrypt_file(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("ohai")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"encrypted",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
"encrypted",
),
path=str(tmpdir),
)
assert stdout == b"ohai\n"
assert stderr == b""
assert rcode == 0
def test_encrypt_file_different_key_autodetect(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("ohai")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format(
source_file,
"encrypted",
"generate",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
"encrypted",
),
path=str(tmpdir),
)
assert stdout == b"ohai\n"
assert stderr == b""
assert rcode == 0
def test_encrypt_file_base64(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'wb') as f:
f.write("öhai".encode('latin-1'))
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"encrypted",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
"encrypted",
),
path=str(tmpdir),
)
assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1')
assert stderr == b""
assert rcode == 0
def test_format_password(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir))
assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n"
assert stderr == b""
assert rcode == 0
def test_human_password(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir))
assert stdout == b"Xaint-Heep-Pier-Tikl-76\n"
assert stderr == b""
assert rcode == 0
def test_human_password_digits(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir))
assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n"
assert stderr == b""
assert rcode == 0
def test_human_password_per_word(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir))
assert stdout == b"X-D-F-H-42\n"
assert stderr == b""
assert rcode == 0
def test_human_password_words(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir))
assert stdout == b"Xaint-Heep-13\n"
assert stderr == b""
assert rcode == 0
def test_random_bytes_as_base64(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir))
assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n"
assert stderr == b""
assert rcode == 0
def test_random_bytes_as_base64_length(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir))
assert stdout == b"rg==\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_decrypt(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"foo\"))'", path=str(tmpdir))
assert stderr == b""
assert rcode == 0
enc_foo = stdout.decode('utf-8').strip()
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.encrypt(\"bar\"))'", path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
enc_bar = stdout.decode('utf-8').strip()
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt(\"{0}\") == repo.vault.decrypt(\"{0}\"))'".format(
enc_foo,
),
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format(
enc_foo, enc_bar,
),
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_decrypt_file(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("foo")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"enc_foo",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("bar")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"enc_bar",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
"enc_foo", "enc_foo",
),
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
"enc_foo", "enc_bar",
),
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_decrypt_file_as_base64(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("foo")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"enc_foo",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("bar")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"enc_bar",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
"enc_foo", "enc_foo",
),
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
"enc_foo", "enc_bar",
),
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_decrypt_file_mixed(tmpdir):
make_repo(tmpdir)
source_file = join(str(tmpdir), "data", "source")
with open(source_file, 'w') as f:
f.write("foo")
stdout, stderr, rcode = run(
"bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
source_file,
"enc_foo",
),
path=str(tmpdir),
)
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
"enc_foo", "enc_foo",
),
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_human_password_for(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"a\"))'",
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"b\"))'",
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_password_for(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"a\"))'",
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"b\"))'",
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_password_for_mixed(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.human_password_for(\"a\"))'",
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_faults_equality_random_bytes_as_base64(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"a\"))'",
path=str(tmpdir),
)
assert stdout == b"True\n"
assert stderr == b""
assert rcode == 0
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"b\"))'",
path=str(tmpdir),
)
assert stdout == b"False\n"
assert stderr == b""
assert rcode == 0
def test_cmd(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.cmd(\"echo hi\"))'",
path=str(tmpdir),
)
assert stdout == b"hi\n"
assert stderr == b""
assert rcode == 0
def test_cmd_binary_nostrip(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.cmd(\"echo hi\", as_text=False, strip=False))'",
path=str(tmpdir),
)
assert stdout == b"b'hi\\n'\n"
assert stderr == b""
assert rcode == 0
def test_cmd_fail(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run(
"bw debug -c 'print(repo.vault.cmd(\"false\"))'",
path=str(tmpdir),
)
assert b"CalledProcessError" in stderr
assert rcode == 1 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deployd.common.caller import Caller
from deployd.download.download_helper import DownloadHelper
from deployd.common.status_code import Status
import logging
import os
log = logging.getLogger(__name__)
class LocalDownloadHelper(DownloadHelper):
def _download_files(self, local_full_fn):
download_cmd = ['curl', '-o', local_full_fn, '-ks', self._url]
log.info('Running command: {}'.format(' '.join(download_cmd)))
error_code = Status.SUCCEEDED
output, error, status = Caller.call_and_log(download_cmd, cwd=os.getcwd())
if output:
log.info(output)
if error:
log.error(error)
if status:
error_code = Status.FAILED
log.info('Finished downloading: {} to {}'.format(self._url, local_full_fn))
return error_code
def download(self, local_full_fn):
log.info("Start to download from local path {} to {}".format(
self._url, local_full_fn))
error = self._download_files(local_full_fn)
if error != Status.SUCCEEDED:
log.error('Failed to download the local tar ball for {}'.format(local_full_fn))
return error | unknown | codeparrot/codeparrot-clean | ||
import sublime_plugin
import sublime
import re
import os
class ConcatCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
window = view.window()
region = sublime.Region(0, view.size())
path = '/'.join(self.view.file_name().split(os.sep)[0:-1])
full_name = self.view.file_name().split('/')[-1]
extension = full_name.split('.')[-1]
file_name = '.'.join(full_name.split('.')[0:-1])
test = '/{0,2}@import url\(\'(.+)\'\);'
i = self.view.find(test, 0)
if i:
new_view = window.new_file()
new_view.set_name(file_name + '.cat.' + extension)
region = sublime.Region(0, view.size())
content = view.substr(region)
try:
syntax_file = self.view.settings().get('syntax')
new_view.set_syntax_file(syntax_file)
except KeyError:
print 'No syntax'
pass
edit = new_view.begin_edit('cat')
new_view.insert(edit, 0, content)
while new_view.find(test, 0):
i = new_view.find(test, 0)
content = new_view.substr(i)
m = re.search(test, content)
if m:
included = m.group(1)
try:
f = open(''.join([path, os.sep, included]))
file_content = f.read()
encoded_content = unicode(file_content, 'utf-8')
new_view.replace(edit, i, encoded_content)
f.close()
except IOError:
print 'Cannot open', included
raise
new_view.end_edit(edit)
window.run_command('save')
window.run_command('build') | unknown | codeparrot/codeparrot-clean | ||
"""
A command line interface for the cloco API.
"""
from setuptools import find_packages, setup
dependencies = ['click', 'requests', 'configparser']
setup(
name='cloco-cli',
version='0.1.7',
license='BSD',
author='345 Systems',
author_email='info@345.systems',
description='A command line interface for the cloco API.',
url='https://github.com/cloudconfig/cloco-cli',
download_url='https://github.com/cloudconfig/cloco-cli/tarball/0.1.7',
keywords=['cloco', 'cloudconfig', 'configuration',
'configuration-as-a-service', 'devops'],
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
'cloco = cloco_cli.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
]
) | unknown | codeparrot/codeparrot-clean | ||
import sys
pythonver = sys.version_info[0]
import client
import httpmw
def read_config(config_files, **predata):
cfg = {}
for config_file in config_files:
cfg.update(_read_config_file(
config_file, predata))
return cfg
def _read_config_file(_config_file, predata):
_file = open(_config_file)
exec(_file, globals(), predata)
_file.close()
if pythonver >= 3:
for _k, _v in list(predata.items()):
if not _k.startswith('_'):
yield _k, _v
for _k, _v in list(locals().items()):
if not _k.startswith('_'):
yield _k, _v
else:
for _k, _v in predata.iteritems():
if not _k.startswith('_'):
yield _k, _v
for _k, _v in locals().iteritems():
if not _k.startswith('_'):
yield _k, _v
class SiteList(object):
def __init__(self):
self.sites = {}
def __getitem__(self, key):
if key not in self.sites:
self.sites[key] = {}
return self.sites[key]
def __iter__(self):
if pythonver >= 3:
return iter(self.sites.values())
else:
return self.sites.itervalues()
class ConfiguredSite(client.Site):
def __init__(self, *config_files, **kwargs):
self.config = read_config(config_files, sites = SiteList())
if 'name' in kwargs:
self.config.update(self.config['sites'][kwargs['name']])
do_login = 'username' in self.config and 'password' in self.config
client.Site.__init__(self, host = self.config['host'],
path = self.config['path'], ext = self.config.get('ext', '.php'),
do_init = not do_login,
retry_timeout = self.config.get('retry_timeout', 30),
max_retries = self.config.get('max_retries', -1))
if do_login:
self.login(self.config['username'],
self.config['password'])
class ConfiguredPool(list):
def __init__(self, *config_files):
self.config = read_config(config_files, sites = SiteList())
self.pool = httpmw.HTTPPool()
if pythonver >= 3:
config = dict([(k, v) for k, v in list(self.config.items()) if k != 'sites'])
else:
config = dict([(k, v) for k, v in self.config.iteritems() if k != 'sites'])
for site in self.config['sites']:
cfg = config.copy()
cfg.update(site)
site.update(cfg)
do_login = 'username' in site and 'password' in site
self.append(client.Site(host = site['host'],
path = site['path'], ext = site.get('ext', '.php'),
pool = self.pool, do_init = not do_login,
retry_timeout = site.get('retry_timeout', 30),
max_retries = site.get('max_retries', -1)))
if do_login:
self[-1].login(site['username'], site['password'])
self[-1].config = site | unknown | codeparrot/codeparrot-clean | ||
/* C Extension module to test all aspects of PEP-3118.
Written by Stefan Krah. */
#include "Python.h"
/* struct module */
static PyObject *structmodule = NULL;
static PyObject *Struct = NULL;
static PyObject *calcsize = NULL;
/* cache simple format string */
static const char *simple_fmt = "B";
static PyObject *simple_format = NULL;
#define SIMPLE_FORMAT(fmt) (fmt == NULL || strcmp(fmt, "B") == 0)
#define FIX_FORMAT(fmt) (fmt == NULL ? "B" : fmt)
/**************************************************************************/
/* NDArray Object */
/**************************************************************************/
static PyTypeObject NDArray_Type;
#define NDArray_Check(v) Py_IS_TYPE(v, &NDArray_Type)
#define CHECK_LIST_OR_TUPLE(v) \
do { \
if (!PyList_Check(v) && !PyTuple_Check(v)) { \
PyErr_SetString(PyExc_TypeError, \
#v " must be a list or a tuple"); \
return NULL; \
} \
} while (0)
#define PyMem_XFree(v) \
do { if (v) PyMem_Free(v); } while (0)
/* Maximum number of dimensions. */
#define ND_MAX_NDIM (2 * PyBUF_MAX_NDIM)
/* Check for the presence of suboffsets in the first dimension. */
#define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0)
/* Adjust ptr if suboffsets are present. */
#define ADJUST_PTR(ptr, suboffsets) \
(HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr)
/* Default: NumPy style (strides), read-only, no var-export, C-style layout */
#define ND_DEFAULT 0x000
/* User configurable flags for the ndarray */
#define ND_VAREXPORT 0x001 /* change layout while buffers are exported */
/* User configurable flags for each base buffer */
#define ND_WRITABLE 0x002 /* mark base buffer as writable */
#define ND_FORTRAN 0x004 /* Fortran contiguous layout */
#define ND_SCALAR 0x008 /* scalar: ndim = 0 */
#define ND_PIL 0x010 /* convert to PIL-style array (suboffsets) */
#define ND_REDIRECT 0x020 /* redirect buffer requests */
#define ND_GETBUF_FAIL 0x040 /* trigger getbuffer failure */
#define ND_GETBUF_UNDEFINED 0x080 /* undefined view.obj */
/* Internal flags for the base buffer */
#define ND_C 0x100 /* C contiguous layout (default) */
#define ND_OWN_ARRAYS 0x200 /* consumer owns arrays */
/* ndarray properties */
#define ND_IS_CONSUMER(nd) \
(((NDArrayObject *)nd)->head == &((NDArrayObject *)nd)->staticbuf)
/* ndbuf->flags properties */
#define ND_C_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C)))
#define ND_FORTRAN_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_FORTRAN)))
#define ND_ANY_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C|ND_FORTRAN)))
/* getbuffer() requests */
#define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
#define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
#define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
#define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
#define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
#define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
#define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
#define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
/* Single node of a list of base buffers. The list is needed to implement
changes in memory layout while exported buffers are active. */
static PyTypeObject NDArray_Type;
struct ndbuf;
typedef struct ndbuf {
struct ndbuf *next;
struct ndbuf *prev;
Py_ssize_t len; /* length of data */
Py_ssize_t offset; /* start of the array relative to data */
char *data; /* raw data */
int flags; /* capabilities of the base buffer */
Py_ssize_t exports; /* number of exports */
Py_buffer base; /* base buffer */
} ndbuf_t;
typedef struct {
PyObject_HEAD
int flags; /* ndarray flags */
ndbuf_t staticbuf; /* static buffer for re-exporting mode */
ndbuf_t *head; /* currently active base buffer */
} NDArrayObject;
static ndbuf_t *
ndbuf_new(Py_ssize_t nitems, Py_ssize_t itemsize, Py_ssize_t offset, int flags)
{
ndbuf_t *ndbuf;
Py_buffer *base;
Py_ssize_t len;
len = nitems * itemsize;
if (offset % itemsize) {
PyErr_SetString(PyExc_ValueError,
"offset must be a multiple of itemsize");
return NULL;
}
if (offset < 0 || offset+itemsize > len) {
PyErr_SetString(PyExc_ValueError, "offset out of bounds");
return NULL;
}
ndbuf = PyMem_Malloc(sizeof *ndbuf);
if (ndbuf == NULL) {
PyErr_NoMemory();
return NULL;
}
ndbuf->next = NULL;
ndbuf->prev = NULL;
ndbuf->len = len;
ndbuf->offset= offset;
ndbuf->data = PyMem_Malloc(len);
if (ndbuf->data == NULL) {
PyErr_NoMemory();
PyMem_Free(ndbuf);
return NULL;
}
ndbuf->flags = flags;
ndbuf->exports = 0;
base = &ndbuf->base;
base->obj = NULL;
base->buf = ndbuf->data;
base->len = len;
base->itemsize = 1;
base->readonly = 0;
base->format = NULL;
base->ndim = 1;
base->shape = NULL;
base->strides = NULL;
base->suboffsets = NULL;
base->internal = ndbuf;
return ndbuf;
}
static void
ndbuf_free(ndbuf_t *ndbuf)
{
Py_buffer *base = &ndbuf->base;
PyMem_XFree(ndbuf->data);
PyMem_XFree(base->format);
PyMem_XFree(base->shape);
PyMem_XFree(base->strides);
PyMem_XFree(base->suboffsets);
PyMem_Free(ndbuf);
}
static void
ndbuf_push(NDArrayObject *nd, ndbuf_t *elt)
{
elt->next = nd->head;
if (nd->head) nd->head->prev = elt;
nd->head = elt;
elt->prev = NULL;
}
static void
ndbuf_delete(NDArrayObject *nd, ndbuf_t *elt)
{
if (elt->prev)
elt->prev->next = elt->next;
else
nd->head = elt->next;
if (elt->next)
elt->next->prev = elt->prev;
ndbuf_free(elt);
}
static void
ndbuf_pop(NDArrayObject *nd)
{
ndbuf_delete(nd, nd->head);
}
static PyObject *
ndarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
NDArrayObject *nd;
nd = PyObject_New(NDArrayObject, &NDArray_Type);
if (nd == NULL)
return NULL;
nd->flags = 0;
nd->head = NULL;
return (PyObject *)nd;
}
static void
ndarray_dealloc(PyObject *op)
{
NDArrayObject *self = (NDArrayObject*)op;
if (self->head) {
if (ND_IS_CONSUMER(self)) {
Py_buffer *base = &self->head->base;
if (self->head->flags & ND_OWN_ARRAYS) {
PyMem_XFree(base->shape);
PyMem_XFree(base->strides);
PyMem_XFree(base->suboffsets);
}
PyBuffer_Release(base);
}
else {
while (self->head)
ndbuf_pop(self);
}
}
PyObject_Free(self);
}
static int
ndarray_init_staticbuf(PyObject *exporter, NDArrayObject *nd, int flags)
{
Py_buffer *base = &nd->staticbuf.base;
if (PyObject_GetBuffer(exporter, base, flags) < 0)
return -1;
nd->head = &nd->staticbuf;
nd->head->next = NULL;
nd->head->prev = NULL;
nd->head->len = -1;
nd->head->offset = -1;
nd->head->data = NULL;
nd->head->flags = base->readonly ? 0 : ND_WRITABLE;
nd->head->exports = 0;
return 0;
}
static void
init_flags(ndbuf_t *ndbuf)
{
if (ndbuf->base.ndim == 0)
ndbuf->flags |= ND_SCALAR;
if (ndbuf->base.suboffsets)
ndbuf->flags |= ND_PIL;
if (PyBuffer_IsContiguous(&ndbuf->base, 'C'))
ndbuf->flags |= ND_C;
if (PyBuffer_IsContiguous(&ndbuf->base, 'F'))
ndbuf->flags |= ND_FORTRAN;
}
/****************************************************************************/
/* Buffer/List conversions */
/****************************************************************************/
static Py_ssize_t *strides_from_shape(const ndbuf_t *, int flags);
/* Get number of members in a struct: see issue #12740 */
typedef struct {
PyObject_HEAD
Py_ssize_t s_size;
Py_ssize_t s_len;
} PyPartialStructObject;
static Py_ssize_t
get_nmemb(PyObject *s)
{
return ((PyPartialStructObject *)s)->s_len;
}
/* Pack all items into the buffer of 'obj'. The 'format' parameter must be
in struct module syntax. For standard C types, a single item is an integer.
For compound types, a single item is a tuple of integers. */
static int
pack_from_list(PyObject *obj, PyObject *items, PyObject *format,
Py_ssize_t itemsize)
{
PyObject *structobj, *pack_into;
PyObject *args, *offset;
PyObject *item, *tmp;
Py_ssize_t nitems; /* number of items */
Py_ssize_t nmemb; /* number of members in a single item */
Py_ssize_t i, j;
int ret = 0;
assert(PyObject_CheckBuffer(obj));
assert(PyList_Check(items) || PyTuple_Check(items));
structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
if (structobj == NULL)
return -1;
nitems = PySequence_Fast_GET_SIZE(items);
nmemb = get_nmemb(structobj);
assert(nmemb >= 1);
pack_into = PyObject_GetAttrString(structobj, "pack_into");
if (pack_into == NULL) {
Py_DECREF(structobj);
return -1;
}
/* nmemb >= 1 */
args = PyTuple_New(2 + nmemb);
if (args == NULL) {
Py_DECREF(pack_into);
Py_DECREF(structobj);
return -1;
}
offset = NULL;
for (i = 0; i < nitems; i++) {
/* Loop invariant: args[j] are borrowed references or NULL. */
PyTuple_SET_ITEM(args, 0, obj);
for (j = 1; j < 2+nmemb; j++)
PyTuple_SET_ITEM(args, j, NULL);
Py_XDECREF(offset);
offset = PyLong_FromSsize_t(i*itemsize);
if (offset == NULL) {
ret = -1;
break;
}
PyTuple_SET_ITEM(args, 1, offset);
item = PySequence_Fast_GET_ITEM(items, i);
if ((PyBytes_Check(item) || PyLong_Check(item) ||
PyFloat_Check(item)) && nmemb == 1) {
PyTuple_SET_ITEM(args, 2, item);
}
else if ((PyList_Check(item) || PyTuple_Check(item)) &&
PySequence_Length(item) == nmemb) {
for (j = 0; j < nmemb; j++) {
tmp = PySequence_Fast_GET_ITEM(item, j);
PyTuple_SET_ITEM(args, 2+j, tmp);
}
}
else {
PyErr_SetString(PyExc_ValueError,
"mismatch between initializer element and format string");
ret = -1;
break;
}
tmp = PyObject_CallObject(pack_into, args);
if (tmp == NULL) {
ret = -1;
break;
}
Py_DECREF(tmp);
}
Py_INCREF(obj); /* args[0] */
/* args[1]: offset is either NULL or should be dealloc'd */
for (i = 2; i < 2+nmemb; i++) {
tmp = PyTuple_GET_ITEM(args, i);
Py_XINCREF(tmp);
}
Py_DECREF(args);
Py_DECREF(pack_into);
Py_DECREF(structobj);
return ret;
}
/* Pack single element */
static int
pack_single(char *ptr, PyObject *item, const char *fmt, Py_ssize_t itemsize)
{
PyObject *structobj = NULL, *pack_into = NULL, *args = NULL;
PyObject *format = NULL, *mview = NULL, *zero = NULL;
Py_ssize_t i, nmemb;
int ret = -1;
PyObject *x;
if (fmt == NULL) fmt = "B";
format = PyUnicode_FromString(fmt);
if (format == NULL)
goto out;
structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
if (structobj == NULL)
goto out;
nmemb = get_nmemb(structobj);
assert(nmemb >= 1);
mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_WRITE);
if (mview == NULL)
goto out;
zero = PyLong_FromLong(0);
if (zero == NULL)
goto out;
pack_into = PyObject_GetAttrString(structobj, "pack_into");
if (pack_into == NULL)
goto out;
args = PyTuple_New(2+nmemb);
if (args == NULL)
goto out;
PyTuple_SET_ITEM(args, 0, mview);
PyTuple_SET_ITEM(args, 1, zero);
if ((PyBytes_Check(item) || PyLong_Check(item) ||
PyFloat_Check(item)) && nmemb == 1) {
PyTuple_SET_ITEM(args, 2, item);
}
else if ((PyList_Check(item) || PyTuple_Check(item)) &&
PySequence_Length(item) == nmemb) {
for (i = 0; i < nmemb; i++) {
x = PySequence_Fast_GET_ITEM(item, i);
PyTuple_SET_ITEM(args, 2+i, x);
}
}
else {
PyErr_SetString(PyExc_ValueError,
"mismatch between initializer element and format string");
goto args_out;
}
x = PyObject_CallObject(pack_into, args);
if (x != NULL) {
Py_DECREF(x);
ret = 0;
}
args_out:
for (i = 0; i < 2+nmemb; i++)
Py_XINCREF(PyTuple_GET_ITEM(args, i));
Py_XDECREF(args);
out:
Py_XDECREF(pack_into);
Py_XDECREF(zero);
Py_XDECREF(mview);
Py_XDECREF(structobj);
Py_XDECREF(format);
return ret;
}
static void
copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
char *mem)
{
Py_ssize_t i;
assert(ndim >= 1);
if (ndim == 1) {
if (!HAVE_PTR(dsuboffsets) && !HAVE_PTR(ssuboffsets) &&
dstrides[0] == itemsize && sstrides[0] == itemsize) {
memmove(dptr, sptr, shape[0] * itemsize);
}
else {
char *p;
assert(mem != NULL);
for (i=0, p=mem; i<shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
memcpy(p, xsptr, itemsize);
}
for (i=0, p=mem; i<shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
memcpy(xdptr, p, itemsize);
}
}
return;
}
for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
copy_rec(shape+1, ndim-1, itemsize,
xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
mem);
}
}
static int
cmp_structure(Py_buffer *dest, Py_buffer *src)
{
Py_ssize_t i;
if (strcmp(FIX_FORMAT(dest->format), FIX_FORMAT(src->format)) != 0 ||
dest->itemsize != src->itemsize ||
dest->ndim != src->ndim)
return -1;
for (i = 0; i < dest->ndim; i++) {
if (dest->shape[i] != src->shape[i])
return -1;
if (dest->shape[i] == 0)
break;
}
return 0;
}
/* Copy src to dest. Both buffers must have the same format, itemsize,
ndim and shape. Copying is atomic, the function never fails with
a partial copy. */
static int
copy_buffer(Py_buffer *dest, Py_buffer *src)
{
char *mem = NULL;
assert(dest->ndim > 0);
if (cmp_structure(dest, src) < 0) {
PyErr_SetString(PyExc_ValueError,
"ndarray assignment: lvalue and rvalue have different structures");
return -1;
}
if ((dest->suboffsets && dest->suboffsets[dest->ndim-1] >= 0) ||
(src->suboffsets && src->suboffsets[src->ndim-1] >= 0) ||
dest->strides[dest->ndim-1] != dest->itemsize ||
src->strides[src->ndim-1] != src->itemsize) {
mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
if (mem == NULL) {
PyErr_NoMemory();
return -1;
}
}
copy_rec(dest->shape, dest->ndim, dest->itemsize,
dest->buf, dest->strides, dest->suboffsets,
src->buf, src->strides, src->suboffsets,
mem);
PyMem_XFree(mem);
return 0;
}
/* Unpack single element */
static PyObject *
unpack_single(char *ptr, const char *fmt, Py_ssize_t itemsize)
{
PyObject *x, *unpack_from, *mview;
if (fmt == NULL) {
fmt = "B";
itemsize = 1;
}
unpack_from = PyObject_GetAttrString(structmodule, "unpack_from");
if (unpack_from == NULL)
return NULL;
mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_READ);
if (mview == NULL) {
Py_DECREF(unpack_from);
return NULL;
}
x = PyObject_CallFunction(unpack_from, "sO", fmt, mview);
Py_DECREF(unpack_from);
Py_DECREF(mview);
if (x == NULL)
return NULL;
if (PyTuple_GET_SIZE(x) == 1) {
PyObject *tmp = PyTuple_GET_ITEM(x, 0);
Py_INCREF(tmp);
Py_DECREF(x);
return tmp;
}
return x;
}
/* Unpack a multi-dimensional matrix into a nested list. Return a scalar
for ndim = 0. */
static PyObject *
unpack_rec(PyObject *unpack_from, char *ptr, PyObject *mview, char *item,
const Py_ssize_t *shape, const Py_ssize_t *strides,
const Py_ssize_t *suboffsets, Py_ssize_t ndim, Py_ssize_t itemsize)
{
PyObject *lst, *x;
Py_ssize_t i;
assert(ndim >= 0);
assert(shape != NULL);
assert(strides != NULL);
if (ndim == 0) {
memcpy(item, ptr, itemsize);
x = PyObject_CallFunctionObjArgs(unpack_from, mview, NULL);
if (x == NULL)
return NULL;
if (PyTuple_GET_SIZE(x) == 1) {
PyObject *tmp = PyTuple_GET_ITEM(x, 0);
Py_INCREF(tmp);
Py_DECREF(x);
return tmp;
}
return x;
}
lst = PyList_New(shape[0]);
if (lst == NULL)
return NULL;
for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
char *nextptr = ADJUST_PTR(ptr, suboffsets);
x = unpack_rec(unpack_from, nextptr, mview, item,
shape+1, strides+1, suboffsets ? suboffsets+1 : NULL,
ndim-1, itemsize);
if (x == NULL) {
Py_DECREF(lst);
return NULL;
}
PyList_SET_ITEM(lst, i, x);
}
return lst;
}
static PyObject *
ndarray_as_list(NDArrayObject *nd)
{
PyObject *structobj = NULL, *unpack_from = NULL;
PyObject *lst = NULL, *mview = NULL;
Py_buffer *base = &nd->head->base;
Py_ssize_t *shape = base->shape;
Py_ssize_t *strides = base->strides;
Py_ssize_t simple_shape[1];
Py_ssize_t simple_strides[1];
char *item = NULL;
PyObject *format;
char *fmt = base->format;
base = &nd->head->base;
if (fmt == NULL) {
PyErr_SetString(PyExc_ValueError,
"ndarray: tolist() does not support format=NULL, use "
"tobytes()");
return NULL;
}
if (shape == NULL) {
assert(ND_C_CONTIGUOUS(nd->head->flags));
assert(base->strides == NULL);
assert(base->ndim <= 1);
shape = simple_shape;
shape[0] = base->len;
strides = simple_strides;
strides[0] = base->itemsize;
}
else if (strides == NULL) {
assert(ND_C_CONTIGUOUS(nd->head->flags));
strides = strides_from_shape(nd->head, 0);
if (strides == NULL)
return NULL;
}
format = PyUnicode_FromString(fmt);
if (format == NULL)
goto out;
structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL);
Py_DECREF(format);
if (structobj == NULL)
goto out;
unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
if (unpack_from == NULL)
goto out;
item = PyMem_Malloc(base->itemsize);
if (item == NULL) {
PyErr_NoMemory();
goto out;
}
mview = PyMemoryView_FromMemory(item, base->itemsize, PyBUF_WRITE);
if (mview == NULL)
goto out;
lst = unpack_rec(unpack_from, base->buf, mview, item,
shape, strides, base->suboffsets,
base->ndim, base->itemsize);
out:
Py_XDECREF(mview);
PyMem_XFree(item);
Py_XDECREF(unpack_from);
Py_XDECREF(structobj);
if (strides != base->strides && strides != simple_strides)
PyMem_XFree(strides);
return lst;
}
/****************************************************************************/
/* Initialize ndbuf */
/****************************************************************************/
/*
State of a new ndbuf during initialization. 'OK' means that initialization
is complete. 'PTR' means that a pointer has been initialized, but the
state of the memory is still undefined and ndbuf->offset is disregarded.
+-----------------+-----------+-------------+----------------+
| | ndbuf_new | init_simple | init_structure |
+-----------------+-----------+-------------+----------------+
| next | OK (NULL) | OK | OK |
+-----------------+-----------+-------------+----------------+
| prev | OK (NULL) | OK | OK |
+-----------------+-----------+-------------+----------------+
| len | OK | OK | OK |
+-----------------+-----------+-------------+----------------+
| offset | OK | OK | OK |
+-----------------+-----------+-------------+----------------+
| data | PTR | OK | OK |
+-----------------+-----------+-------------+----------------+
| flags | user | user | OK |
+-----------------+-----------+-------------+----------------+
| exports | OK (0) | OK | OK |
+-----------------+-----------+-------------+----------------+
| base.obj | OK (NULL) | OK | OK |
+-----------------+-----------+-------------+----------------+
| base.buf | PTR | PTR | OK |
+-----------------+-----------+-------------+----------------+
| base.len | len(data) | len(data) | OK |
+-----------------+-----------+-------------+----------------+
| base.itemsize | 1 | OK | OK |
+-----------------+-----------+-------------+----------------+
| base.readonly | 0 | OK | OK |
+-----------------+-----------+-------------+----------------+
| base.format | NULL | OK | OK |
+-----------------+-----------+-------------+----------------+
| base.ndim | 1 | 1 | OK |
+-----------------+-----------+-------------+----------------+
| base.shape | NULL | NULL | OK |
+-----------------+-----------+-------------+----------------+
| base.strides | NULL | NULL | OK |
+-----------------+-----------+-------------+----------------+
| base.suboffsets | NULL | NULL | OK |
+-----------------+-----------+-------------+----------------+
| base.internal | OK | OK | OK |
+-----------------+-----------+-------------+----------------+
*/
static Py_ssize_t
get_itemsize(PyObject *format)
{
PyObject *tmp;
Py_ssize_t itemsize;
tmp = PyObject_CallFunctionObjArgs(calcsize, format, NULL);
if (tmp == NULL)
return -1;
itemsize = PyLong_AsSsize_t(tmp);
Py_DECREF(tmp);
return itemsize;
}
static char *
get_format(PyObject *format)
{
PyObject *tmp;
char *fmt;
tmp = PyUnicode_AsASCIIString(format);
if (tmp == NULL)
return NULL;
fmt = PyMem_Malloc(PyBytes_GET_SIZE(tmp)+1);
if (fmt == NULL) {
PyErr_NoMemory();
Py_DECREF(tmp);
return NULL;
}
strcpy(fmt, PyBytes_AS_STRING(tmp));
Py_DECREF(tmp);
return fmt;
}
static int
init_simple(ndbuf_t *ndbuf, PyObject *items, PyObject *format,
Py_ssize_t itemsize)
{
PyObject *mview;
Py_buffer *base = &ndbuf->base;
int ret;
mview = PyMemoryView_FromBuffer(base);
if (mview == NULL)
return -1;
ret = pack_from_list(mview, items, format, itemsize);
Py_DECREF(mview);
if (ret < 0)
return -1;
base->readonly = !(ndbuf->flags & ND_WRITABLE);
base->itemsize = itemsize;
base->format = get_format(format);
if (base->format == NULL)
return -1;
return 0;
}
static Py_ssize_t *
seq_as_ssize_array(PyObject *seq, Py_ssize_t len, int is_shape)
{
Py_ssize_t *dest;
Py_ssize_t x, i;
/* ndim = len <= ND_MAX_NDIM, so PyMem_New() is actually not needed. */
dest = PyMem_New(Py_ssize_t, len);
if (dest == NULL) {
PyErr_NoMemory();
return NULL;
}
for (i = 0; i < len; i++) {
PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
if (!PyLong_Check(tmp)) {
PyErr_Format(PyExc_ValueError,
"elements of %s must be integers",
is_shape ? "shape" : "strides");
PyMem_Free(dest);
return NULL;
}
x = PyLong_AsSsize_t(tmp);
if (PyErr_Occurred()) {
PyMem_Free(dest);
return NULL;
}
if (is_shape && x < 0) {
PyErr_Format(PyExc_ValueError,
"elements of shape must be integers >= 0");
PyMem_Free(dest);
return NULL;
}
dest[i] = x;
}
return dest;
}
static Py_ssize_t *
strides_from_shape(const ndbuf_t *ndbuf, int flags)
{
const Py_buffer *base = &ndbuf->base;
Py_ssize_t *s, i;
s = PyMem_Malloc(base->ndim * (sizeof *s));
if (s == NULL) {
PyErr_NoMemory();
return NULL;
}
if (flags & ND_FORTRAN) {
s[0] = base->itemsize;
for (i = 1; i < base->ndim; i++)
s[i] = s[i-1] * base->shape[i-1];
}
else {
s[base->ndim-1] = base->itemsize;
for (i = base->ndim-2; i >= 0; i--)
s[i] = s[i+1] * base->shape[i+1];
}
return s;
}
/* Bounds check:
len := complete length of allocated memory
offset := start of the array
A single array element is indexed by:
i = indices[0] * strides[0] + indices[1] * strides[1] + ...
imin is reached when all indices[n] combined with positive strides are 0
and all indices combined with negative strides are shape[n]-1, which is
the maximum index for the nth dimension.
imax is reached when all indices[n] combined with negative strides are 0
and all indices combined with positive strides are shape[n]-1.
*/
static int
verify_structure(Py_ssize_t len, Py_ssize_t itemsize, Py_ssize_t offset,
const Py_ssize_t *shape, const Py_ssize_t *strides,
Py_ssize_t ndim)
{
Py_ssize_t imin, imax;
Py_ssize_t n;
assert(ndim >= 0);
if (ndim == 0 && (offset < 0 || offset+itemsize > len))
goto invalid_combination;
for (n = 0; n < ndim; n++)
if (strides[n] % itemsize) {
PyErr_SetString(PyExc_ValueError,
"strides must be a multiple of itemsize");
return -1;
}
for (n = 0; n < ndim; n++)
if (shape[n] == 0)
return 0;
imin = imax = 0;
for (n = 0; n < ndim; n++)
if (strides[n] <= 0)
imin += (shape[n]-1) * strides[n];
else
imax += (shape[n]-1) * strides[n];
if (imin + offset < 0 || imax + offset + itemsize > len)
goto invalid_combination;
return 0;
invalid_combination:
PyErr_SetString(PyExc_ValueError,
"invalid combination of buffer, shape and strides");
return -1;
}
/*
Convert a NumPy-style array to an array using suboffsets to stride in
the first dimension. Requirements: ndim > 0.
Contiguous example
==================
Input:
------
shape = {2, 2, 3};
strides = {6, 3, 1};
suboffsets = NULL;
data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
buf = &data[0]
Output:
-------
shape = {2, 2, 3};
strides = {sizeof(char *), 3, 1};
suboffsets = {0, -1, -1};
data = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
| | ^ ^
`---'---' |
| |
`---------------------'
buf = &data[0]
So, in the example the input resembles the three-dimensional array
char v[2][2][3], while the output resembles an array of two pointers
to two-dimensional arrays: char (*v[2])[2][3].
Non-contiguous example:
=======================
Input (with offset and negative strides):
-----------------------------------------
shape = {2, 2, 3};
strides = {-6, 3, -1};
offset = 8
suboffsets = NULL;
data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
Output:
-------
shape = {2, 2, 3};
strides = {-sizeof(char *), 3, -1};
suboffsets = {2, -1, -1};
newdata = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
| | ^ ^ ^ ^
`---'---' | | `- p2+suboffsets[0]
| `-----------|--- p1+suboffsets[0]
`---------------------'
buf = &newdata[1] # striding backwards over the pointers.
suboffsets[0] is the same as the offset that one would specify if
the two {2, 3} subarrays were created directly, hence the name.
*/
static int
init_suboffsets(ndbuf_t *ndbuf)
{
Py_buffer *base = &ndbuf->base;
Py_ssize_t start, step;
Py_ssize_t imin, suboffset0;
Py_ssize_t addsize;
Py_ssize_t n;
char *data;
assert(base->ndim > 0);
assert(base->suboffsets == NULL);
/* Allocate new data with additional space for shape[0] pointers. */
addsize = base->shape[0] * (sizeof (char *));
/* Align array start to a multiple of 8. */
addsize = 8 * ((addsize + 7) / 8);
data = PyMem_Malloc(ndbuf->len + addsize);
if (data == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(data + addsize, ndbuf->data, ndbuf->len);
PyMem_Free(ndbuf->data);
ndbuf->data = data;
ndbuf->len += addsize;
base->buf = ndbuf->data;
/* imin: minimum index of the input array relative to ndbuf->offset.
suboffset0: offset for each sub-array of the output. This is the
same as calculating -imin' for a sub-array of ndim-1. */
imin = suboffset0 = 0;
for (n = 0; n < base->ndim; n++) {
if (base->shape[n] == 0)
break;
if (base->strides[n] <= 0) {
Py_ssize_t x = (base->shape[n]-1) * base->strides[n];
imin += x;
suboffset0 += (n >= 1) ? -x : 0;
}
}
/* Initialize the array of pointers to the sub-arrays. */
start = addsize + ndbuf->offset + imin;
step = base->strides[0] < 0 ? -base->strides[0] : base->strides[0];
for (n = 0; n < base->shape[0]; n++)
((char **)base->buf)[n] = (char *)base->buf + start + n*step;
/* Initialize suboffsets. */
base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets));
if (base->suboffsets == NULL) {
PyErr_NoMemory();
return -1;
}
base->suboffsets[0] = suboffset0;
for (n = 1; n < base->ndim; n++)
base->suboffsets[n] = -1;
/* Adjust strides for the first (zeroth) dimension. */
if (base->strides[0] >= 0) {
base->strides[0] = sizeof(char *);
}
else {
/* Striding backwards. */
base->strides[0] = -(Py_ssize_t)sizeof(char *);
if (base->shape[0] > 0)
base->buf = (char *)base->buf + (base->shape[0]-1) * sizeof(char *);
}
ndbuf->flags &= ~(ND_C|ND_FORTRAN);
ndbuf->offset = 0;
return 0;
}
static void
init_len(Py_buffer *base)
{
Py_ssize_t i;
base->len = 1;
for (i = 0; i < base->ndim; i++)
base->len *= base->shape[i];
base->len *= base->itemsize;
}
static int
init_structure(ndbuf_t *ndbuf, PyObject *shape, PyObject *strides,
Py_ssize_t ndim)
{
Py_buffer *base = &ndbuf->base;
base->ndim = (int)ndim;
if (ndim == 0) {
if (ndbuf->flags & ND_PIL) {
PyErr_SetString(PyExc_TypeError,
"ndim = 0 cannot be used in conjunction with ND_PIL");
return -1;
}
ndbuf->flags |= (ND_SCALAR|ND_C|ND_FORTRAN);
return 0;
}
/* shape */
base->shape = seq_as_ssize_array(shape, ndim, 1);
if (base->shape == NULL)
return -1;
/* strides */
if (strides) {
base->strides = seq_as_ssize_array(strides, ndim, 0);
}
else {
base->strides = strides_from_shape(ndbuf, ndbuf->flags);
}
if (base->strides == NULL)
return -1;
if (verify_structure(base->len, base->itemsize, ndbuf->offset,
base->shape, base->strides, ndim) < 0)
return -1;
/* buf */
base->buf = ndbuf->data + ndbuf->offset;
/* len */
init_len(base);
/* ndbuf->flags */
if (PyBuffer_IsContiguous(base, 'C'))
ndbuf->flags |= ND_C;
if (PyBuffer_IsContiguous(base, 'F'))
ndbuf->flags |= ND_FORTRAN;
/* convert numpy array to suboffset representation */
if (ndbuf->flags & ND_PIL) {
/* modifies base->buf, base->strides and base->suboffsets **/
return init_suboffsets(ndbuf);
}
return 0;
}
static ndbuf_t *
init_ndbuf(PyObject *items, PyObject *shape, PyObject *strides,
Py_ssize_t offset, PyObject *format, int flags)
{
ndbuf_t *ndbuf;
Py_ssize_t ndim;
Py_ssize_t nitems;
Py_ssize_t itemsize;
/* ndim = len(shape) */
CHECK_LIST_OR_TUPLE(shape);
ndim = PySequence_Fast_GET_SIZE(shape);
if (ndim > ND_MAX_NDIM) {
PyErr_Format(PyExc_ValueError,
"ndim must not exceed %d", ND_MAX_NDIM);
return NULL;
}
/* len(strides) = len(shape) */
if (strides) {
CHECK_LIST_OR_TUPLE(strides);
if (PySequence_Fast_GET_SIZE(strides) == 0)
strides = NULL;
else if (flags & ND_FORTRAN) {
PyErr_SetString(PyExc_TypeError,
"ND_FORTRAN cannot be used together with strides");
return NULL;
}
else if (PySequence_Fast_GET_SIZE(strides) != ndim) {
PyErr_SetString(PyExc_ValueError,
"len(shape) != len(strides)");
return NULL;
}
}
/* itemsize */
itemsize = get_itemsize(format);
if (itemsize <= 0) {
if (itemsize == 0) {
PyErr_SetString(PyExc_ValueError,
"itemsize must not be zero");
}
return NULL;
}
/* convert scalar to list */
if (ndim == 0) {
items = PyTuple_Pack(1, items);
if (items == NULL)
return NULL;
}
else {
CHECK_LIST_OR_TUPLE(items);
Py_INCREF(items);
}
/* number of items */
nitems = PySequence_Fast_GET_SIZE(items);
if (nitems == 0) {
PyErr_SetString(PyExc_ValueError,
"initializer list or tuple must not be empty");
Py_DECREF(items);
return NULL;
}
ndbuf = ndbuf_new(nitems, itemsize, offset, flags);
if (ndbuf == NULL) {
Py_DECREF(items);
return NULL;
}
if (init_simple(ndbuf, items, format, itemsize) < 0)
goto error;
if (init_structure(ndbuf, shape, strides, ndim) < 0)
goto error;
Py_DECREF(items);
return ndbuf;
error:
Py_DECREF(items);
ndbuf_free(ndbuf);
return NULL;
}
/* initialize and push a new base onto the linked list */
static int
ndarray_push_base(NDArrayObject *nd, PyObject *items,
PyObject *shape, PyObject *strides,
Py_ssize_t offset, PyObject *format, int flags)
{
ndbuf_t *ndbuf;
ndbuf = init_ndbuf(items, shape, strides, offset, format, flags);
if (ndbuf == NULL)
return -1;
ndbuf_push(nd, ndbuf);
return 0;
}
#define PyBUF_UNUSED 0x10000
static int
ndarray_init(PyObject *self, PyObject *args, PyObject *kwds)
{
NDArrayObject *nd = (NDArrayObject *)self;
static char *kwlist[] = {
"obj", "shape", "strides", "offset", "format", "flags", "getbuf", NULL
};
PyObject *v = NULL; /* initializer: scalar, list, tuple or base object */
PyObject *shape = NULL; /* size of each dimension */
PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
Py_ssize_t offset = 0; /* buffer offset */
PyObject *format = simple_format; /* struct module specifier: "B" */
int flags = ND_DEFAULT; /* base buffer and ndarray flags */
int getbuf = PyBUF_UNUSED; /* re-exporter: getbuffer request flags */
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OOnOii", kwlist,
&v, &shape, &strides, &offset, &format, &flags, &getbuf))
return -1;
/* NDArrayObject is re-exporter */
if (PyObject_CheckBuffer(v) && shape == NULL) {
if (strides || offset || format != simple_format ||
!(flags == ND_DEFAULT || flags == ND_REDIRECT)) {
PyErr_SetString(PyExc_TypeError,
"construction from exporter object only takes 'obj', 'getbuf' "
"and 'flags' arguments");
return -1;
}
getbuf = (getbuf == PyBUF_UNUSED) ? PyBUF_FULL_RO : getbuf;
if (ndarray_init_staticbuf(v, nd, getbuf) < 0)
return -1;
init_flags(nd->head);
nd->head->flags |= flags;
return 0;
}
/* NDArrayObject is the original base object. */
if (getbuf != PyBUF_UNUSED) {
PyErr_SetString(PyExc_TypeError,
"getbuf argument only valid for construction from exporter "
"object");
return -1;
}
if (shape == NULL) {
PyErr_SetString(PyExc_TypeError,
"shape is a required argument when constructing from "
"list, tuple or scalar");
return -1;
}
if (flags & ND_VAREXPORT) {
nd->flags |= ND_VAREXPORT;
flags &= ~ND_VAREXPORT;
}
/* Initialize and push the first base buffer onto the linked list. */
return ndarray_push_base(nd, v, shape, strides, offset, format, flags);
}
/* Push an additional base onto the linked list. */
static PyObject *
ndarray_push(PyObject *self, PyObject *args, PyObject *kwds)
{
NDArrayObject *nd = (NDArrayObject *)self;
static char *kwlist[] = {
"items", "shape", "strides", "offset", "format", "flags", NULL
};
PyObject *items = NULL; /* initializer: scalar, list or tuple */
PyObject *shape = NULL; /* size of each dimension */
PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
PyObject *format = simple_format; /* struct module specifier: "B" */
Py_ssize_t offset = 0; /* buffer offset */
int flags = ND_DEFAULT; /* base buffer flags */
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OnOi", kwlist,
&items, &shape, &strides, &offset, &format, &flags))
return NULL;
if (flags & ND_VAREXPORT) {
PyErr_SetString(PyExc_ValueError,
"ND_VAREXPORT flag can only be used during object creation");
return NULL;
}
if (ND_IS_CONSUMER(nd)) {
PyErr_SetString(PyExc_BufferError,
"structure of re-exporting object is immutable");
return NULL;
}
if (!(nd->flags&ND_VAREXPORT) && nd->head->exports > 0) {
PyErr_Format(PyExc_BufferError,
"cannot change structure: %zd exported buffer%s",
nd->head->exports, nd->head->exports==1 ? "" : "s");
return NULL;
}
if (ndarray_push_base(nd, items, shape, strides,
offset, format, flags) < 0)
return NULL;
Py_RETURN_NONE;
}
/* Pop a base from the linked list (if possible). */
static PyObject *
ndarray_pop(PyObject *self, PyObject *dummy)
{
NDArrayObject *nd = (NDArrayObject *)self;
if (ND_IS_CONSUMER(nd)) {
PyErr_SetString(PyExc_BufferError,
"structure of re-exporting object is immutable");
return NULL;
}
if (nd->head->exports > 0) {
PyErr_Format(PyExc_BufferError,
"cannot change structure: %zd exported buffer%s",
nd->head->exports, nd->head->exports==1 ? "" : "s");
return NULL;
}
if (nd->head->next == NULL) {
PyErr_SetString(PyExc_BufferError,
"list only has a single base");
return NULL;
}
ndbuf_pop(nd);
Py_RETURN_NONE;
}
/**************************************************************************/
/* getbuffer */
/**************************************************************************/
static int
ndarray_getbuf(PyObject *op, Py_buffer *view, int flags)
{
NDArrayObject *self = (NDArrayObject*)op;
ndbuf_t *ndbuf = self->head;
Py_buffer *base = &ndbuf->base;
int baseflags = ndbuf->flags;
/* redirect mode */
if (base->obj != NULL && (baseflags&ND_REDIRECT)) {
return PyObject_GetBuffer(base->obj, view, flags);
}
/* start with complete information */
*view = *base;
view->obj = NULL;
/* reconstruct format */
if (view->format == NULL)
view->format = "B";
if (base->ndim != 0 &&
((REQ_SHAPE(flags) && base->shape == NULL) ||
(REQ_STRIDES(flags) && base->strides == NULL))) {
/* The ndarray is a re-exporter that has been created without full
information for testing purposes. In this particular case the
ndarray is not a PEP-3118 compliant buffer provider. */
PyErr_SetString(PyExc_BufferError,
"re-exporter does not provide format, shape or strides");
return -1;
}
if (baseflags & ND_GETBUF_FAIL) {
PyErr_SetString(PyExc_BufferError,
"ND_GETBUF_FAIL: forced test exception");
if (baseflags & ND_GETBUF_UNDEFINED)
view->obj = (PyObject *)0x1; /* wrong but permitted in <= 3.2 */
return -1;
}
if (REQ_WRITABLE(flags) && base->readonly) {
PyErr_SetString(PyExc_BufferError,
"ndarray is not writable");
return -1;
}
if (!REQ_FORMAT(flags)) {
/* NULL indicates that the buffer's data type has been cast to 'B'.
view->itemsize is the _previous_ itemsize. If shape is present,
the equality product(shape) * itemsize = len still holds at this
point. The equality calcsize(format) = itemsize does _not_ hold
from here on! */
view->format = NULL;
}
if (REQ_C_CONTIGUOUS(flags) && !ND_C_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"ndarray is not C-contiguous");
return -1;
}
if (REQ_F_CONTIGUOUS(flags) && !ND_FORTRAN_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"ndarray is not Fortran contiguous");
return -1;
}
if (REQ_ANY_CONTIGUOUS(flags) && !ND_ANY_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"ndarray is not contiguous");
return -1;
}
if (!REQ_INDIRECT(flags) && (baseflags & ND_PIL)) {
PyErr_SetString(PyExc_BufferError,
"ndarray cannot be represented without suboffsets");
return -1;
}
if (!REQ_STRIDES(flags)) {
if (!ND_C_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"ndarray is not C-contiguous");
return -1;
}
view->strides = NULL;
}
if (!REQ_SHAPE(flags)) {
/* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
so base->buf = ndbuf->data. */
if (view->format != NULL) {
/* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
not make sense. */
PyErr_Format(PyExc_BufferError,
"ndarray: cannot cast to unsigned bytes if the format flag "
"is present");
return -1;
}
/* product(shape) * itemsize = len and calcsize(format) = itemsize
do _not_ hold from here on! */
view->ndim = 1;
view->shape = NULL;
}
/* Ascertain that the new buffer has the same contiguity as the exporter */
if (ND_C_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'C') ||
/* skip cast to 1-d */
(view->format != NULL && view->shape != NULL &&
ND_FORTRAN_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'F')) ||
/* cast to 1-d */
(view->format == NULL && view->shape == NULL &&
!PyBuffer_IsContiguous(view, 'F'))) {
PyErr_SetString(PyExc_BufferError,
"ndarray: contiguity mismatch in getbuf()");
return -1;
}
view->obj = Py_NewRef(self);
self->head->exports++;
return 0;
}
static void
ndarray_releasebuf(PyObject *op, Py_buffer *view)
{
NDArrayObject *self = (NDArrayObject*)op;
if (!ND_IS_CONSUMER(self)) {
ndbuf_t *ndbuf = view->internal;
if (--ndbuf->exports == 0 && ndbuf != self->head)
ndbuf_delete(self, ndbuf);
}
}
static PyBufferProcs ndarray_as_buffer = {
ndarray_getbuf, /* bf_getbuffer */
ndarray_releasebuf, /* bf_releasebuffer */
};
/**************************************************************************/
/* indexing/slicing */
/**************************************************************************/
static char *
ptr_from_index(Py_buffer *base, Py_ssize_t index)
{
char *ptr;
Py_ssize_t nitems; /* items in the first dimension */
if (base->shape)
nitems = base->shape[0];
else {
assert(base->ndim == 1 && SIMPLE_FORMAT(base->format));
nitems = base->len;
}
if (index < 0) {
index += nitems;
}
if (index < 0 || index >= nitems) {
PyErr_SetString(PyExc_IndexError, "index out of bounds");
return NULL;
}
ptr = (char *)base->buf;
if (base->strides == NULL)
ptr += base->itemsize * index;
else
ptr += base->strides[0] * index;
ptr = ADJUST_PTR(ptr, base->suboffsets);
return ptr;
}
static PyObject *
ndarray_item(PyObject *op, Py_ssize_t index)
{
NDArrayObject *self = (NDArrayObject *)op;
ndbuf_t *ndbuf = self->head;
Py_buffer *base = &ndbuf->base;
char *ptr;
if (base->ndim == 0) {
PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
return NULL;
}
ptr = ptr_from_index(base, index);
if (ptr == NULL)
return NULL;
if (base->ndim == 1) {
return unpack_single(ptr, base->format, base->itemsize);
}
else {
NDArrayObject *nd;
Py_buffer *subview;
nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL);
if (nd == NULL)
return NULL;
if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) {
Py_DECREF(nd);
return NULL;
}
subview = &nd->staticbuf.base;
subview->buf = ptr;
subview->len /= subview->shape[0];
subview->ndim--;
subview->shape++;
if (subview->strides) subview->strides++;
if (subview->suboffsets) subview->suboffsets++;
init_flags(&nd->staticbuf);
return (PyObject *)nd;
}
}
/*
For each dimension, we get valid (start, stop, step, slicelength) quadruples
from PySlice_GetIndicesEx().
Slicing NumPy arrays
====================
A pointer to an element in a NumPy array is defined by:
ptr = (char *)buf + indices[0] * strides[0] +
... +
indices[ndim-1] * strides[ndim-1]
Adjust buf:
-----------
Adding start[n] for each dimension effectively adds the constant:
c = start[0] * strides[0] + ... + start[ndim-1] * strides[ndim-1]
Therefore init_slice() adds all start[n] directly to buf.
Adjust shape:
-------------
Obviously shape[n] = slicelength[n]
Adjust strides:
---------------
In the original array, the next element in a dimension is reached
by adding strides[n] to the pointer. In the sliced array, elements
may be skipped, so the next element is reached by adding:
strides[n] * step[n]
Slicing PIL arrays
==================
Layout:
-------
In the first (zeroth) dimension, PIL arrays have an array of pointers
to sub-arrays of ndim-1. Striding in the first dimension is done by
getting the index of the nth pointer, dereference it and then add a
suboffset to it. The arrays pointed to can best be seen a regular
NumPy arrays.
Adjust buf:
-----------
In the original array, buf points to a location (usually the start)
in the array of pointers. For the sliced array, start[0] can be
added to buf in the same manner as for NumPy arrays.
Adjust suboffsets:
------------------
Due to the dereferencing step in the addressing scheme, it is not
possible to adjust buf for higher dimensions. Recall that the
sub-arrays pointed to are regular NumPy arrays, so for each of
those arrays adding start[n] effectively adds the constant:
c = start[1] * strides[1] + ... + start[ndim-1] * strides[ndim-1]
This constant is added to suboffsets[0]. suboffsets[0] in turn is
added to each pointer right after dereferencing.
Adjust shape and strides:
-------------------------
Shape and strides are not influenced by the dereferencing step, so
they are adjusted in the same manner as for NumPy arrays.
Multiple levels of suboffsets
=============================
For a construct like an array of pointers to array of pointers to
sub-arrays of ndim-2:
suboffsets[0] = start[1] * strides[1]
suboffsets[1] = start[2] * strides[2] + ...
*/
static int
init_slice(Py_buffer *base, PyObject *key, int dim)
{
Py_ssize_t start, stop, step, slicelength;
if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
return -1;
}
slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
if (base->suboffsets == NULL || dim == 0) {
adjust_buf:
base->buf = (char *)base->buf + base->strides[dim] * start;
}
else {
Py_ssize_t n = dim-1;
while (n >= 0 && base->suboffsets[n] < 0)
n--;
if (n < 0)
goto adjust_buf; /* all suboffsets are negative */
base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
}
base->shape[dim] = slicelength;
base->strides[dim] = base->strides[dim] * step;
return 0;
}
static int
copy_structure(Py_buffer *base)
{
Py_ssize_t *shape = NULL, *strides = NULL, *suboffsets = NULL;
Py_ssize_t i;
shape = PyMem_Malloc(base->ndim * (sizeof *shape));
strides = PyMem_Malloc(base->ndim * (sizeof *strides));
if (shape == NULL || strides == NULL)
goto err_nomem;
suboffsets = NULL;
if (base->suboffsets) {
suboffsets = PyMem_Malloc(base->ndim * (sizeof *suboffsets));
if (suboffsets == NULL)
goto err_nomem;
}
for (i = 0; i < base->ndim; i++) {
shape[i] = base->shape[i];
strides[i] = base->strides[i];
if (suboffsets)
suboffsets[i] = base->suboffsets[i];
}
base->shape = shape;
base->strides = strides;
base->suboffsets = suboffsets;
return 0;
err_nomem:
PyErr_NoMemory();
PyMem_XFree(shape);
PyMem_XFree(strides);
PyMem_XFree(suboffsets);
return -1;
}
static PyObject *
ndarray_subscript(PyObject *op, PyObject *key)
{
NDArrayObject *self = (NDArrayObject*)op;
NDArrayObject *nd;
ndbuf_t *ndbuf;
Py_buffer *base = &self->head->base;
if (base->ndim == 0) {
if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
return unpack_single(base->buf, base->format, base->itemsize);
}
else if (key == Py_Ellipsis) {
return Py_NewRef(self);
}
else {
PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
return NULL;
}
}
if (PyIndex_Check(key)) {
Py_ssize_t index = PyLong_AsSsize_t(key);
if (index == -1 && PyErr_Occurred())
return NULL;
return ndarray_item(op, index);
}
nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL);
if (nd == NULL)
return NULL;
/* new ndarray is a consumer */
if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) {
Py_DECREF(nd);
return NULL;
}
/* copy shape, strides and suboffsets */
ndbuf = nd->head;
base = &ndbuf->base;
if (copy_structure(base) < 0) {
Py_DECREF(nd);
return NULL;
}
ndbuf->flags |= ND_OWN_ARRAYS;
if (PySlice_Check(key)) {
/* one-dimensional slice */
if (init_slice(base, key, 0) < 0)
goto err_occurred;
}
else if (PyTuple_Check(key)) {
/* multi-dimensional slice */
PyObject *tuple = key;
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
for (i = 0; i < n; i++) {
key = PyTuple_GET_ITEM(tuple, i);
if (!PySlice_Check(key))
goto type_error;
if (init_slice(base, key, (int)i) < 0)
goto err_occurred;
}
}
else {
goto type_error;
}
init_len(base);
init_flags(ndbuf);
return (PyObject *)nd;
type_error:
PyErr_Format(PyExc_TypeError,
"cannot index memory using \"%T\"", key);
err_occurred:
Py_DECREF(nd);
return NULL;
}
static int
ndarray_ass_subscript(PyObject *op, PyObject *key, PyObject *value)
{
NDArrayObject *self = (NDArrayObject*)op;
NDArrayObject *nd;
Py_buffer *dest = &self->head->base;
Py_buffer src;
char *ptr;
Py_ssize_t index;
int ret = -1;
if (dest->readonly) {
PyErr_SetString(PyExc_TypeError, "ndarray is not writable");
return -1;
}
if (value == NULL) {
PyErr_SetString(PyExc_TypeError, "ndarray data cannot be deleted");
return -1;
}
if (dest->ndim == 0) {
if (key == Py_Ellipsis ||
(PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0)) {
ptr = (char *)dest->buf;
return pack_single(ptr, value, dest->format, dest->itemsize);
}
else {
PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar");
return -1;
}
}
if (dest->ndim == 1 && PyIndex_Check(key)) {
/* rvalue must be a single item */
index = PyLong_AsSsize_t(key);
if (index == -1 && PyErr_Occurred())
return -1;
else {
ptr = ptr_from_index(dest, index);
if (ptr == NULL)
return -1;
}
return pack_single(ptr, value, dest->format, dest->itemsize);
}
/* rvalue must be an exporter */
if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) == -1)
return -1;
nd = (NDArrayObject *)ndarray_subscript((PyObject*)self, key);
if (nd != NULL) {
dest = &nd->head->base;
ret = copy_buffer(dest, &src);
Py_DECREF(nd);
}
PyBuffer_Release(&src);
return ret;
}
static PyObject *
slice_indices(PyObject *self, PyObject *args)
{
PyObject *ret, *key, *tmp;
Py_ssize_t s[4]; /* start, stop, step, slicelength */
Py_ssize_t i, len;
if (!PyArg_ParseTuple(args, "On", &key, &len)) {
return NULL;
}
if (!PySlice_Check(key)) {
PyErr_SetString(PyExc_TypeError,
"first argument must be a slice object");
return NULL;
}
if (PySlice_Unpack(key, &s[0], &s[1], &s[2]) < 0) {
return NULL;
}
s[3] = PySlice_AdjustIndices(len, &s[0], &s[1], s[2]);
ret = PyTuple_New(4);
if (ret == NULL)
return NULL;
for (i = 0; i < 4; i++) {
tmp = PyLong_FromSsize_t(s[i]);
if (tmp == NULL)
goto error;
PyTuple_SET_ITEM(ret, i, tmp);
}
return ret;
error:
Py_DECREF(ret);
return NULL;
}
static PyMappingMethods ndarray_as_mapping = {
NULL, /* mp_length */
ndarray_subscript, /* mp_subscript */
ndarray_ass_subscript /* mp_ass_subscript */
};
static PySequenceMethods ndarray_as_sequence = {
0, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
ndarray_item, /* sq_item */
};
/**************************************************************************/
/* getters */
/**************************************************************************/
static PyObject *
ssize_array_as_tuple(Py_ssize_t *array, Py_ssize_t len)
{
PyObject *tuple, *x;
Py_ssize_t i;
if (array == NULL)
return PyTuple_New(0);
tuple = PyTuple_New(len);
if (tuple == NULL)
return NULL;
for (i = 0; i < len; i++) {
x = PyLong_FromSsize_t(array[i]);
if (x == NULL) {
Py_DECREF(tuple);
return NULL;
}
PyTuple_SET_ITEM(tuple, i, x);
}
return tuple;
}
static PyObject *
ndarray_get_flags(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
return PyLong_FromLong(self->head->flags);
}
static PyObject *
ndarray_get_offset(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
ndbuf_t *ndbuf = self->head;
return PyLong_FromSsize_t(ndbuf->offset);
}
static PyObject *
ndarray_get_obj(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
if (base->obj == NULL) {
Py_RETURN_NONE;
}
return Py_NewRef(base->obj);
}
static PyObject *
ndarray_get_nbytes(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return PyLong_FromSsize_t(base->len);
}
static PyObject *
ndarray_get_readonly(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return PyBool_FromLong(base->readonly);
}
static PyObject *
ndarray_get_itemsize(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return PyLong_FromSsize_t(base->itemsize);
}
static PyObject *
ndarray_get_format(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
const char *fmt = base->format ? base->format : "";
return PyUnicode_FromString(fmt);
}
static PyObject *
ndarray_get_ndim(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return PyLong_FromSsize_t(base->ndim);
}
static PyObject *
ndarray_get_shape(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return ssize_array_as_tuple(base->shape, base->ndim);
}
static PyObject *
ndarray_get_strides(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return ssize_array_as_tuple(base->strides, base->ndim);
}
static PyObject *
ndarray_get_suboffsets(PyObject *op, void *closure)
{
NDArrayObject *self = (NDArrayObject*)op;
Py_buffer *base = &self->head->base;
return ssize_array_as_tuple(base->suboffsets, base->ndim);
}
static PyObject *
ndarray_c_contig(PyObject *self, void *dummy)
{
NDArrayObject *nd = (NDArrayObject *)self;
int ret = PyBuffer_IsContiguous(&nd->head->base, 'C');
if (ret != ND_C_CONTIGUOUS(nd->head->flags)) {
PyErr_SetString(PyExc_RuntimeError,
"results from PyBuffer_IsContiguous() and flags differ");
return NULL;
}
return PyBool_FromLong(ret);
}
static PyObject *
ndarray_fortran_contig(PyObject *self, void *dummy)
{
NDArrayObject *nd = (NDArrayObject *)self;
int ret = PyBuffer_IsContiguous(&nd->head->base, 'F');
if (ret != ND_FORTRAN_CONTIGUOUS(nd->head->flags)) {
PyErr_SetString(PyExc_RuntimeError,
"results from PyBuffer_IsContiguous() and flags differ");
return NULL;
}
return PyBool_FromLong(ret);
}
static PyObject *
ndarray_contig(PyObject *self, void *dummy)
{
NDArrayObject *nd = (NDArrayObject *)self;
int ret = PyBuffer_IsContiguous(&nd->head->base, 'A');
if (ret != ND_ANY_CONTIGUOUS(nd->head->flags)) {
PyErr_SetString(PyExc_RuntimeError,
"results from PyBuffer_IsContiguous() and flags differ");
return NULL;
}
return PyBool_FromLong(ret);
}
static PyGetSetDef ndarray_getset [] =
{
/* ndbuf */
{ "flags", ndarray_get_flags, NULL, NULL, NULL},
{ "offset", ndarray_get_offset, NULL, NULL, NULL},
/* ndbuf.base */
{ "obj", ndarray_get_obj, NULL, NULL, NULL},
{ "nbytes", ndarray_get_nbytes, NULL, NULL, NULL},
{ "readonly", ndarray_get_readonly, NULL, NULL, NULL},
{ "itemsize", ndarray_get_itemsize, NULL, NULL, NULL},
{ "format", ndarray_get_format, NULL, NULL, NULL},
{ "ndim", ndarray_get_ndim, NULL, NULL, NULL},
{ "shape", ndarray_get_shape, NULL, NULL, NULL},
{ "strides", ndarray_get_strides, NULL, NULL, NULL},
{ "suboffsets", ndarray_get_suboffsets, NULL, NULL, NULL},
{ "c_contiguous", ndarray_c_contig, NULL, NULL, NULL},
{ "f_contiguous", ndarray_fortran_contig, NULL, NULL, NULL},
{ "contiguous", ndarray_contig, NULL, NULL, NULL},
{NULL}
};
static PyObject *
ndarray_tolist(PyObject *self, PyObject *dummy)
{
return ndarray_as_list((NDArrayObject *)self);
}
static PyObject *
ndarray_tobytes(PyObject *self, PyObject *dummy)
{
ndbuf_t *ndbuf = ((NDArrayObject *)self)->head;
Py_buffer *src = &ndbuf->base;
Py_buffer dest;
PyObject *ret = NULL;
char *mem;
if (ND_C_CONTIGUOUS(ndbuf->flags))
return PyBytes_FromStringAndSize(src->buf, src->len);
assert(src->shape != NULL);
assert(src->strides != NULL);
assert(src->ndim > 0);
mem = PyMem_Malloc(src->len);
if (mem == NULL) {
PyErr_NoMemory();
return NULL;
}
dest = *src;
dest.buf = mem;
dest.suboffsets = NULL;
dest.strides = strides_from_shape(ndbuf, 0);
if (dest.strides == NULL)
goto out;
if (copy_buffer(&dest, src) < 0)
goto out;
ret = PyBytes_FromStringAndSize(mem, src->len);
out:
PyMem_XFree(dest.strides);
PyMem_Free(mem);
return ret;
}
/* add redundant (negative) suboffsets for testing */
static PyObject *
ndarray_add_suboffsets(PyObject *self, PyObject *dummy)
{
NDArrayObject *nd = (NDArrayObject *)self;
Py_buffer *base = &nd->head->base;
Py_ssize_t i;
if (base->suboffsets != NULL) {
PyErr_SetString(PyExc_TypeError,
"cannot add suboffsets to PIL-style array");
return NULL;
}
if (base->strides == NULL) {
PyErr_SetString(PyExc_TypeError,
"cannot add suboffsets to array without strides");
return NULL;
}
base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets));
if (base->suboffsets == NULL) {
PyErr_NoMemory();
return NULL;
}
for (i = 0; i < base->ndim; i++)
base->suboffsets[i] = -1;
nd->head->flags &= ~(ND_C|ND_FORTRAN);
Py_RETURN_NONE;
}
/* Test PyMemoryView_FromBuffer(): return a memoryview from a static buffer.
Obviously this is fragile and only one such view may be active at any
time. Never use anything like this in real code! */
static char *infobuf = NULL;
static PyObject *
ndarray_memoryview_from_buffer(PyObject *self, PyObject *dummy)
{
const NDArrayObject *nd = (NDArrayObject *)self;
const Py_buffer *view = &nd->head->base;
const ndbuf_t *ndbuf;
static char format[ND_MAX_NDIM+1];
static Py_ssize_t shape[ND_MAX_NDIM];
static Py_ssize_t strides[ND_MAX_NDIM];
static Py_ssize_t suboffsets[ND_MAX_NDIM];
static Py_buffer info;
char *p;
if (!ND_IS_CONSUMER(nd))
ndbuf = nd->head; /* self is ndarray/original exporter */
else if (NDArray_Check(view->obj) && !ND_IS_CONSUMER(view->obj))
/* self is ndarray and consumer from ndarray/original exporter */
ndbuf = ((NDArrayObject *)view->obj)->head;
else {
PyErr_SetString(PyExc_TypeError,
"memoryview_from_buffer(): ndarray must be original exporter or "
"consumer from ndarray/original exporter");
return NULL;
}
info = *view;
p = PyMem_Realloc(infobuf, ndbuf->len);
if (p == NULL) {
PyMem_Free(infobuf);
PyErr_NoMemory();
infobuf = NULL;
return NULL;
}
else {
infobuf = p;
}
/* copy the complete raw data */
memcpy(infobuf, ndbuf->data, ndbuf->len);
info.buf = infobuf + ((char *)view->buf - ndbuf->data);
if (view->format) {
if (strlen(view->format) > ND_MAX_NDIM) {
PyErr_Format(PyExc_TypeError,
"memoryview_from_buffer: format is limited to %d characters",
ND_MAX_NDIM);
return NULL;
}
strcpy(format, view->format);
info.format = format;
}
if (view->ndim > ND_MAX_NDIM) {
PyErr_Format(PyExc_TypeError,
"memoryview_from_buffer: ndim is limited to %d", ND_MAX_NDIM);
return NULL;
}
if (view->shape) {
memcpy(shape, view->shape, view->ndim * sizeof(Py_ssize_t));
info.shape = shape;
}
if (view->strides) {
memcpy(strides, view->strides, view->ndim * sizeof(Py_ssize_t));
info.strides = strides;
}
if (view->suboffsets) {
memcpy(suboffsets, view->suboffsets, view->ndim * sizeof(Py_ssize_t));
info.suboffsets = suboffsets;
}
return PyMemoryView_FromBuffer(&info);
}
/* Get a single item from bufobj at the location specified by seq.
seq is a list or tuple of indices. The purpose of this function
is to check other functions against PyBuffer_GetPointer(). */
static PyObject *
get_pointer(PyObject *self, PyObject *args)
{
PyObject *ret = NULL, *bufobj, *seq;
Py_buffer view;
Py_ssize_t indices[ND_MAX_NDIM];
Py_ssize_t i;
void *ptr;
if (!PyArg_ParseTuple(args, "OO", &bufobj, &seq)) {
return NULL;
}
CHECK_LIST_OR_TUPLE(seq);
if (PyObject_GetBuffer(bufobj, &view, PyBUF_FULL_RO) < 0)
return NULL;
if (view.ndim > ND_MAX_NDIM) {
PyErr_Format(PyExc_ValueError,
"get_pointer(): ndim > %d", ND_MAX_NDIM);
goto out;
}
if (PySequence_Fast_GET_SIZE(seq) != view.ndim) {
PyErr_SetString(PyExc_ValueError,
"get_pointer(): len(indices) != ndim");
goto out;
}
for (i = 0; i < view.ndim; i++) {
PyObject *x = PySequence_Fast_GET_ITEM(seq, i);
indices[i] = PyLong_AsSsize_t(x);
if (PyErr_Occurred())
goto out;
if (indices[i] < 0 || indices[i] >= view.shape[i]) {
PyErr_Format(PyExc_ValueError,
"get_pointer(): invalid index %zd at position %zd",
indices[i], i);
goto out;
}
}
ptr = PyBuffer_GetPointer(&view, indices);
ret = unpack_single(ptr, view.format, view.itemsize);
out:
PyBuffer_Release(&view);
return ret;
}
static PyObject *
get_sizeof_void_p(PyObject *self, PyObject *Py_UNUSED(ignored))
{
return PyLong_FromSize_t(sizeof(void *));
}
static char
get_ascii_order(PyObject *order)
{
PyObject *ascii_order;
char ord;
if (!PyUnicode_Check(order)) {
PyErr_SetString(PyExc_TypeError,
"order must be a string");
return CHAR_MAX;
}
ascii_order = PyUnicode_AsASCIIString(order);
if (ascii_order == NULL) {
return CHAR_MAX;
}
ord = PyBytes_AS_STRING(ascii_order)[0];
Py_DECREF(ascii_order);
if (ord != 'C' && ord != 'F' && ord != 'A') {
PyErr_SetString(PyExc_ValueError,
"invalid order, must be C, F or A");
return CHAR_MAX;
}
return ord;
}
/* Get a contiguous memoryview. */
static PyObject *
get_contiguous(PyObject *self, PyObject *args)
{
PyObject *obj;
PyObject *buffertype;
PyObject *order;
long type;
char ord;
if (!PyArg_ParseTuple(args, "OOO", &obj, &buffertype, &order)) {
return NULL;
}
if (!PyLong_Check(buffertype)) {
PyErr_SetString(PyExc_TypeError,
"buffertype must be PyBUF_READ or PyBUF_WRITE");
return NULL;
}
type = PyLong_AsLong(buffertype);
if (type == -1 && PyErr_Occurred()) {
return NULL;
}
if (type != PyBUF_READ && type != PyBUF_WRITE) {
PyErr_SetString(PyExc_ValueError,
"invalid buffer type");
return NULL;
}
ord = get_ascii_order(order);
if (ord == CHAR_MAX)
return NULL;
return PyMemoryView_GetContiguous(obj, (int)type, ord);
}
/* PyBuffer_ToContiguous() */
static PyObject *
py_buffer_to_contiguous(PyObject *self, PyObject *args)
{
PyObject *obj;
PyObject *order;
PyObject *ret = NULL;
int flags;
char ord;
Py_buffer view;
char *buf = NULL;
if (!PyArg_ParseTuple(args, "OOi", &obj, &order, &flags)) {
return NULL;
}
if (PyObject_GetBuffer(obj, &view, flags) < 0) {
return NULL;
}
ord = get_ascii_order(order);
if (ord == CHAR_MAX) {
goto out;
}
buf = PyMem_Malloc(view.len);
if (buf == NULL) {
PyErr_NoMemory();
goto out;
}
if (PyBuffer_ToContiguous(buf, &view, view.len, ord) < 0) {
goto out;
}
ret = PyBytes_FromStringAndSize(buf, view.len);
out:
PyBuffer_Release(&view);
PyMem_XFree(buf);
return ret;
}
static int
fmtcmp(const char *fmt1, const char *fmt2)
{
if (fmt1 == NULL) {
return fmt2 == NULL || strcmp(fmt2, "B") == 0;
}
if (fmt2 == NULL) {
return fmt1 == NULL || strcmp(fmt1, "B") == 0;
}
return strcmp(fmt1, fmt2) == 0;
}
static int
arraycmp(const Py_ssize_t *a1, const Py_ssize_t *a2, const Py_ssize_t *shape,
Py_ssize_t ndim)
{
Py_ssize_t i;
for (i = 0; i < ndim; i++) {
if (shape && shape[i] <= 1) {
/* strides can differ if the dimension is less than 2 */
continue;
}
if (a1[i] != a2[i]) {
return 0;
}
}
return 1;
}
/* Compare two contiguous buffers for physical equality. */
static PyObject *
cmp_contig(PyObject *self, PyObject *args)
{
PyObject *b1, *b2; /* buffer objects */
Py_buffer v1, v2;
PyObject *ret;
int equal = 0;
if (!PyArg_ParseTuple(args, "OO", &b1, &b2)) {
return NULL;
}
if (PyObject_GetBuffer(b1, &v1, PyBUF_FULL_RO) < 0) {
PyErr_SetString(PyExc_TypeError,
"cmp_contig: first argument does not implement the buffer "
"protocol");
return NULL;
}
if (PyObject_GetBuffer(b2, &v2, PyBUF_FULL_RO) < 0) {
PyErr_SetString(PyExc_TypeError,
"cmp_contig: second argument does not implement the buffer "
"protocol");
PyBuffer_Release(&v1);
return NULL;
}
if (!(PyBuffer_IsContiguous(&v1, 'C')&&PyBuffer_IsContiguous(&v2, 'C')) &&
!(PyBuffer_IsContiguous(&v1, 'F')&&PyBuffer_IsContiguous(&v2, 'F'))) {
goto result;
}
/* readonly may differ if created from non-contiguous */
if (v1.len != v2.len ||
v1.itemsize != v2.itemsize ||
v1.ndim != v2.ndim ||
!fmtcmp(v1.format, v2.format) ||
!!v1.shape != !!v2.shape ||
!!v1.strides != !!v2.strides ||
!!v1.suboffsets != !!v2.suboffsets) {
goto result;
}
if ((v1.shape && !arraycmp(v1.shape, v2.shape, NULL, v1.ndim)) ||
(v1.strides && !arraycmp(v1.strides, v2.strides, v1.shape, v1.ndim)) ||
(v1.suboffsets && !arraycmp(v1.suboffsets, v2.suboffsets, NULL,
v1.ndim))) {
goto result;
}
if (memcmp((char *)v1.buf, (char *)v2.buf, v1.len) != 0) {
goto result;
}
equal = 1;
result:
PyBuffer_Release(&v1);
PyBuffer_Release(&v2);
ret = equal ? Py_True : Py_False;
return Py_NewRef(ret);
}
static PyObject *
is_contiguous(PyObject *self, PyObject *args)
{
PyObject *obj;
PyObject *order;
PyObject *ret = NULL;
Py_buffer view, *base;
char ord;
if (!PyArg_ParseTuple(args, "OO", &obj, &order)) {
return NULL;
}
ord = get_ascii_order(order);
if (ord == CHAR_MAX) {
return NULL;
}
if (NDArray_Check(obj)) {
/* Skip the buffer protocol to check simple etc. buffers directly. */
base = &((NDArrayObject *)obj)->head->base;
ret = PyBuffer_IsContiguous(base, ord) ? Py_True : Py_False;
}
else {
if (PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO) < 0) {
PyErr_SetString(PyExc_TypeError,
"is_contiguous: object does not implement the buffer "
"protocol");
return NULL;
}
ret = PyBuffer_IsContiguous(&view, ord) ? Py_True : Py_False;
PyBuffer_Release(&view);
}
return Py_NewRef(ret);
}
static Py_hash_t
ndarray_hash(PyObject *self)
{
const NDArrayObject *nd = (NDArrayObject *)self;
const Py_buffer *view = &nd->head->base;
PyObject *bytes;
Py_hash_t hash;
if (!view->readonly) {
PyErr_SetString(PyExc_ValueError,
"cannot hash writable ndarray object");
return -1;
}
if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
return -1;
}
bytes = ndarray_tobytes(self, NULL);
if (bytes == NULL) {
return -1;
}
hash = PyObject_Hash(bytes);
Py_DECREF(bytes);
return hash;
}
static PyMethodDef ndarray_methods[] =
{
{ "tolist", ndarray_tolist, METH_NOARGS, NULL },
{ "tobytes", ndarray_tobytes, METH_NOARGS, NULL },
{ "push", _PyCFunction_CAST(ndarray_push), METH_VARARGS|METH_KEYWORDS, NULL },
{ "pop", ndarray_pop, METH_NOARGS, NULL },
{ "add_suboffsets", ndarray_add_suboffsets, METH_NOARGS, NULL },
{ "memoryview_from_buffer", ndarray_memoryview_from_buffer, METH_NOARGS, NULL },
{NULL}
};
static PyTypeObject NDArray_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"ndarray", /* Name of this type */
sizeof(NDArrayObject), /* Basic object size */
0, /* Item size for varobject */
ndarray_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
0, /* tp_repr */
0, /* tp_as_number */
&ndarray_as_sequence, /* tp_as_sequence */
&ndarray_as_mapping, /* tp_as_mapping */
ndarray_hash, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
&ndarray_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
ndarray_methods, /* tp_methods */
0, /* tp_members */
ndarray_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
ndarray_init, /* tp_init */
0, /* tp_alloc */
ndarray_new, /* tp_new */
};
/**************************************************************************/
/* StaticArray Object */
/**************************************************************************/
static PyTypeObject StaticArray_Type;
typedef struct {
PyObject_HEAD
int legacy_mode; /* if true, use the view.obj==NULL hack */
} StaticArrayObject;
static char static_mem[12] = {0,1,2,3,4,5,6,7,8,9,10,11};
static Py_ssize_t static_shape[1] = {12};
static Py_ssize_t static_strides[1] = {1};
static Py_buffer static_buffer = {
static_mem, /* buf */
NULL, /* obj */
12, /* len */
1, /* itemsize */
1, /* readonly */
1, /* ndim */
"B", /* format */
static_shape, /* shape */
static_strides, /* strides */
NULL, /* suboffsets */
NULL /* internal */
};
static PyObject *
staticarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
return (PyObject *)PyObject_New(StaticArrayObject, &StaticArray_Type);
}
static int
staticarray_init(PyObject *self, PyObject *args, PyObject *kwds)
{
StaticArrayObject *a = (StaticArrayObject *)self;
static char *kwlist[] = {
"legacy_mode", NULL
};
PyObject *legacy_mode = Py_False;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwlist, &legacy_mode))
return -1;
a->legacy_mode = (legacy_mode != Py_False);
return 0;
}
static void
staticarray_dealloc(PyObject *self)
{
PyObject_Free(self);
}
/* Return a buffer for a PyBUF_FULL_RO request. Flags are not checked,
which makes this object a non-compliant exporter! */
static int
staticarray_getbuf(PyObject *op, Py_buffer *view, int flags)
{
StaticArrayObject *self = (StaticArrayObject *)op;
*view = static_buffer;
if (self->legacy_mode) {
view->obj = NULL; /* Don't use this in new code. */
}
else {
view->obj = Py_NewRef(self);
}
return 0;
}
static PyBufferProcs staticarray_as_buffer = {
staticarray_getbuf, /* bf_getbuffer */
NULL, /* bf_releasebuffer */
};
static PyTypeObject StaticArray_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"staticarray", /* Name of this type */
sizeof(StaticArrayObject), /* Basic object size */
0, /* Item size for varobject */
staticarray_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
&staticarray_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
staticarray_init, /* tp_init */
0, /* tp_alloc */
staticarray_new, /* tp_new */
};
static struct PyMethodDef _testbuffer_functions[] = {
{"slice_indices", slice_indices, METH_VARARGS, NULL},
{"get_pointer", get_pointer, METH_VARARGS, NULL},
{"get_sizeof_void_p", get_sizeof_void_p, METH_NOARGS, NULL},
{"get_contiguous", get_contiguous, METH_VARARGS, NULL},
{"py_buffer_to_contiguous", py_buffer_to_contiguous, METH_VARARGS, NULL},
{"is_contiguous", is_contiguous, METH_VARARGS, NULL},
{"cmp_contig", cmp_contig, METH_VARARGS, NULL},
{NULL, NULL}
};
static struct PyModuleDef _testbuffermodule = {
PyModuleDef_HEAD_INIT,
"_testbuffer",
NULL,
-1,
_testbuffer_functions,
NULL,
NULL,
NULL,
NULL
};
static int
_testbuffer_exec(PyObject *mod)
{
Py_SET_TYPE(&NDArray_Type, &PyType_Type);
if (PyType_Ready(&NDArray_Type)) {
return -1;
}
if (PyModule_AddType(mod, &NDArray_Type) < 0) {
return -1;
}
Py_SET_TYPE(&StaticArray_Type, &PyType_Type);
if (PyModule_AddType(mod, &StaticArray_Type) < 0) {
return -1;
}
structmodule = PyImport_ImportModule("struct");
if (structmodule == NULL) {
return -1;
}
Struct = PyObject_GetAttrString(structmodule, "Struct");
if (Struct == NULL) {
return -1;
}
calcsize = PyObject_GetAttrString(structmodule, "calcsize");
if (calcsize == NULL) {
return -1;
}
simple_format = PyUnicode_FromString(simple_fmt);
if (simple_format == NULL) {
return -1;
}
#define ADD_INT_MACRO(mod, macro) \
do { \
if (PyModule_AddIntConstant(mod, #macro, macro) < 0) { \
return -1; \
} \
} while (0)
ADD_INT_MACRO(mod, ND_MAX_NDIM);
ADD_INT_MACRO(mod, ND_VAREXPORT);
ADD_INT_MACRO(mod, ND_WRITABLE);
ADD_INT_MACRO(mod, ND_FORTRAN);
ADD_INT_MACRO(mod, ND_SCALAR);
ADD_INT_MACRO(mod, ND_PIL);
ADD_INT_MACRO(mod, ND_GETBUF_FAIL);
ADD_INT_MACRO(mod, ND_GETBUF_UNDEFINED);
ADD_INT_MACRO(mod, ND_REDIRECT);
ADD_INT_MACRO(mod, PyBUF_SIMPLE);
ADD_INT_MACRO(mod, PyBUF_WRITABLE);
ADD_INT_MACRO(mod, PyBUF_FORMAT);
ADD_INT_MACRO(mod, PyBUF_ND);
ADD_INT_MACRO(mod, PyBUF_STRIDES);
ADD_INT_MACRO(mod, PyBUF_INDIRECT);
ADD_INT_MACRO(mod, PyBUF_C_CONTIGUOUS);
ADD_INT_MACRO(mod, PyBUF_F_CONTIGUOUS);
ADD_INT_MACRO(mod, PyBUF_ANY_CONTIGUOUS);
ADD_INT_MACRO(mod, PyBUF_FULL);
ADD_INT_MACRO(mod, PyBUF_FULL_RO);
ADD_INT_MACRO(mod, PyBUF_RECORDS);
ADD_INT_MACRO(mod, PyBUF_RECORDS_RO);
ADD_INT_MACRO(mod, PyBUF_STRIDED);
ADD_INT_MACRO(mod, PyBUF_STRIDED_RO);
ADD_INT_MACRO(mod, PyBUF_CONTIG);
ADD_INT_MACRO(mod, PyBUF_CONTIG_RO);
ADD_INT_MACRO(mod, PyBUF_READ);
ADD_INT_MACRO(mod, PyBUF_WRITE);
#undef ADD_INT_MACRO
return 0;
}
PyMODINIT_FUNC
PyInit__testbuffer(void)
{
PyObject *mod = PyModule_Create(&_testbuffermodule);
if (mod == NULL) {
return NULL;
}
#ifdef Py_GIL_DISABLED
PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED);
#endif
if (_testbuffer_exec(mod) < 0) {
Py_DECREF(mod);
return NULL;
}
return mod;
} | c | github | https://github.com/python/cpython | Modules/_testbuffer.c |
'''
* Copyright (C) 2015 Tripwire, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import xml.etree.ElementTree as ET
import argparse, sys, os, shutil, re
import TARDIS
if __name__=="__main__":
#os.chdir('C:\\TARDIS')
os.chdir('/opt/tardis')
#Get options from the command line...
parser = argparse.ArgumentParser(description='TARDIS Threat Parser')
parser.add_argument('-f', help='IP360 XML3 File', dest='file', required=True)
args = parser.parse_args()
#File must be in the Tripwire IP360 XML3 format
file=args.file
try:
tree = ET.parse(file)
root = tree.getroot()
except:
sys.exit("Not a valid XML file, use IP360 XML3 audit output")
#Clear results folder to have a fresh starting point...
if os.path.exists('Results'):
shutil.rmtree('Results')
numHosts=0
for host in root.findall("./audit/hosts/host"):
numHosts=numHosts+1
directory='Results'
#Create results directory to store the raw output
if not os.path.exists(directory):
os.makedirs(directory)
#Get IP address to run threat search against
for ip in host.findall("./ip"):
sourceIP=ip.text
#We like individual directories per IP
if not os.path.exists(directory + '/' + sourceIP):
os.makedirs(directory + '/' + sourceIP)
for hostname in host.findall("./dnsName"):
sourceHost=hostname.text
for vulnerability in host.findall("./vulnerabilities/vulnerability"):
internalVulnerabilityID=vulnerability.get('id')
vulnName=internalVulnerabilityID
#Convert internal vulnerability ID into a human readable name
for line in open("idmap.config"):
if internalVulnerabilityID in line:
vulnName = re.sub('\d+\:', '', line)
vulnName = re.sub('(\r\n|\r|\n)', '', vulnName)
internalVulnerabilityID = vulnName
numResults=TARDIS.main(vulnName, sourceIP, sourceHost)
if numHosts<1:
sys.exit("Not a valid XML file, use IP360 XML3 audit output") | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.reactor
import reactor.core.publisher.Flux
import reactor.core.publisher.Mono
fun <T> checkMonoValue(
mono: Mono<T>,
checker: (T) -> Unit
) {
val monoValue = mono.block()
checker(monoValue)
}
fun checkErroneous(
mono: Mono<*>,
checker: (Throwable) -> Unit
) {
try {
mono.block()
error("Should have failed")
} catch (e: Throwable) {
checker(e)
}
}
fun <T> checkSingleValue(
flux: Flux<T>,
checker: (T) -> Unit
) {
val singleValue = flux.toIterable().single()
checker(singleValue)
}
fun checkErroneous(
flux: Flux<*>,
checker: (Throwable) -> Unit
) {
val singleNotification = flux.materialize().toIterable().single()
checker(singleNotification.throwable)
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | reactive/kotlinx-coroutines-reactor/test/Check.kt |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package stackruntime
import (
"context"
"path"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/collections"
"github.com/hashicorp/terraform/internal/depsfile"
"github.com/hashicorp/terraform/internal/getproviders/providerreqs"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/stacks/stackaddrs"
"github.com/hashicorp/terraform/internal/stacks/stackplan"
"github.com/hashicorp/terraform/internal/stacks/stackruntime/hooks"
stacks_testing_provider "github.com/hashicorp/terraform/internal/stacks/stackruntime/testing"
"github.com/hashicorp/terraform/internal/stacks/stackstate"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/hashicorp/terraform/version"
)
func TestApplyDestroy(t *testing.T) {
fakePlanTimestamp, err := time.Parse(time.RFC3339, "2021-01-01T00:00:00Z")
if err != nil {
t.Fatal(err)
}
tcs := map[string]struct {
path string
description string
state *stackstate.State
store *stacks_testing_provider.ResourceStore
mutators []func(*stacks_testing_provider.ResourceStore, TestContext) TestContext
cycles []TestCycle
}{
"inputs-and-outputs": {
path: "component-input-output",
state: stackstate.NewStateBuilder().
AddInput("value", cty.StringVal("foo")).
AddOutput("value", cty.StringVal("foo")).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"value": cty.StringVal("foo"),
},
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangeOutputValue{
Addr: mustStackOutputValue("value"),
Action: plans.Delete,
Before: cty.StringVal("foo"),
After: cty.NullVal(cty.String),
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeRootInputValue{
Addr: mustStackInputVariable("value"),
Action: plans.NoOp,
Before: cty.StringVal("foo"),
After: cty.StringVal("foo"),
DeleteOnApply: true,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeOutputValue{
Addr: mustStackOutputValue("value"),
Value: cty.NilVal, // destroyed
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("value"),
Value: cty.NilVal, // destroyed
},
},
},
},
},
"missing-resource": {
path: path.Join("with-single-input", "valid"),
description: "tests what happens when a resource is in state but not in the provider",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.self")).
AddInputVariable("id", cty.StringVal("e84b59f2")).
AddInputVariable("value", cty.StringVal("hello"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.self.testing_resource.data")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
SchemaVersion: 0,
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "e84b59f2",
"value": "hello",
}),
Status: states.ObjectReady,
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"input": cty.StringVal("hello"),
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
// The resource that was in state but not in the data store should still
// be included to be destroyed.
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: nil, // We should be removing this from the state file.
Schema: providers.Schema{},
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
Value: cty.NilVal, // destroyed
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("input"),
Value: cty.NilVal, // destroyed
},
},
},
},
},
"datasource-in-state": {
path: "with-data-source",
description: "tests that we emit removal notices for data sources",
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("foo", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("foo"),
"value": cty.StringVal("hello"),
})).Build(),
state: stackstate.NewStateBuilder().
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.self.data.testing_data_source.missing")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
SchemaVersion: 0,
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "e84b59f2",
"value": "hello",
}),
Status: states.ObjectReady,
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"id": cty.StringVal("foo"),
"resource": cty.StringVal("bar"),
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
// This is a bit of a quirk of the system, this wasn't in the state
// file before so we don't need to emit this. But since Terraform
// pushes data sources into the refresh state, it's very difficult to
// tell the difference between this kind of change that doesn't need to
// be emitted, and the next change that does need to be emitted. It's
// better to emit both than to miss one, and emitting this doesn't
// actually harm anything.
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.data.testing_data_source.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: providers.Schema{},
NewStateSrc: nil, // deleted
},
// This was in the state file, so we're emitting the destroy notice.
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.data.testing_data_source.missing"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: providers.Schema{},
NewStateSrc: nil,
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
Value: cty.NilVal, // destroyed
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("resource"),
Value: cty.NilVal, // destroyed
},
},
},
},
},
"orphaned-data-sources-removed": {
path: "with-data-source",
description: "tests that we emit removal notices for data sources that are no longer in the configuration",
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("foo", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("foo"),
"value": cty.StringVal("hello"),
})).Build(),
state: stackstate.NewStateBuilder().
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.self.data.testing_data_source.missing")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
SchemaVersion: 0,
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "e84b59f2",
"value": "hello",
}),
Status: states.ObjectReady,
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.NormalMode,
planInputs: map[string]cty.Value{
"id": cty.StringVal("foo"),
"resource": cty.StringVal("bar"),
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("foo"),
mustInputVariable("resource"): cty.StringVal("bar"),
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.data.testing_data_source.data"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "foo",
"value": "hello",
}),
AttrSensitivePaths: make([]cty.Path, 0),
Status: states.ObjectReady,
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingDataSourceSchema,
},
// This data source should be removed from the state file as it is no
// longer in the configuration.
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.data.testing_data_source.missing"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: providers.Schema{},
NewStateSrc: nil, // deleted
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "bar",
"value": "hello",
}),
Status: states.ObjectReady,
Dependencies: []addrs.ConfigResource{
mustAbsResourceInstance("data.testing_data_source.data").ConfigResource(),
},
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
Value: cty.StringVal("foo"),
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("resource"),
Value: cty.StringVal("bar"),
},
},
},
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"id": cty.StringVal("foo"),
"resource": cty.StringVal("bar"),
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.data.testing_data_source.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: providers.Schema{},
NewStateSrc: nil, // deleted
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: providers.Schema{},
NewStateSrc: nil, // deleted
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
Value: cty.NilVal, // destroyed
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("resource"),
Value: cty.NilVal, // destroyed
},
},
},
},
},
"dependent-resources": {
path: "dependent-component",
description: "test the order of operations during create and destroy",
cycles: []TestCycle{
{
planMode: plans.NormalMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
Dependencies: collections.NewSet(mustAbsComponent("component.valid")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("dependent"),
mustInputVariable("requirements"): cty.SetVal([]cty.Value{
cty.StringVal("valid"),
}),
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_blocked_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "dependent",
"value": nil,
"required_resources": []interface{}{"valid"},
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
Schema: stacks_testing_provider.BlockedResourceSchema,
},
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.valid"),
ComponentInstanceAddr: mustAbsComponentInstance("component.valid"),
Dependents: collections.NewSet(mustAbsComponent("component.self")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("valid"),
mustInputVariable("input"): cty.StringVal("resource"),
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.valid.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "valid",
"value": "resource",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
Schema: stacks_testing_provider.TestingResourceSchema,
},
},
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_blocked_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.valid"),
ComponentInstanceAddr: mustAbsComponentInstance("component.valid"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.valid.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"failed-destroy": {
path: "failed-component",
description: "tests what happens if a component fails to destroy",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.self"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.self.testing_failed_resource.data")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "failed",
"value": "resource",
"fail_plan": false,
"fail_apply": true,
}),
Status: states.ObjectReady,
}).
SetProviderAddr(mustDefaultRootProvider("testing"))).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("failed", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("failed"),
"value": cty.StringVal("resource"),
"fail_plan": cty.False,
"fail_apply": cty.True,
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("failed"),
mustInputVariable("input"): cty.StringVal("resource"),
mustInputVariable("fail_plan"): cty.False,
mustInputVariable("fail_apply"): cty.False,
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_failed_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "failed",
"value": "resource",
"fail_plan": false,
"fail_apply": true,
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
Schema: stacks_testing_provider.FailedResourceSchema,
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("fail_apply"),
Value: cty.NilVal, // destroyed
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("fail_plan"),
Value: cty.NilVal, // destroyed
},
},
wantAppliedDiags: initDiags(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics {
return diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "failedResource error",
Detail: "failed during apply",
})
}),
},
},
},
"destroy-after-failed-apply": {
path: path.Join("with-single-input", "failed-child"),
description: "tests destroying when state is only partially applied",
cycles: []TestCycle{
{
planMode: plans.NormalMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
Dependencies: collections.NewSet(mustAbsComponent("component.self")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.NullVal(cty.String),
mustInputVariable("input"): cty.StringVal("child"),
mustInputVariable("fail_plan"): cty.NullVal(cty.Bool),
mustInputVariable("fail_apply"): cty.True,
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child.testing_failed_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
Dependents: collections.NewSet(mustAbsComponent("component.child")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("self"),
mustInputVariable("input"): cty.StringVal("value"),
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "self",
"value": "value",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
Schema: stacks_testing_provider.TestingResourceSchema,
},
},
wantAppliedDiags: initDiags(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics {
return diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "failedResource error",
Detail: "failed during apply",
})
}),
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-after-deferred-apply": {
path: "deferred-dependent",
description: "tests what happens when a destroy plan is applied after components have been deferred",
cycles: []TestCycle{
{
planMode: plans.NormalMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.deferred"),
ComponentInstanceAddr: mustAbsComponentInstance("component.deferred"),
Dependencies: collections.NewSet(mustAbsComponent("component.valid")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("deferred"),
mustInputVariable("defer"): cty.True,
},
},
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.valid"),
ComponentInstanceAddr: mustAbsComponentInstance("component.valid"),
Dependents: collections.NewSet(mustAbsComponent("component.deferred")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("valid"),
mustInputVariable("input"): cty.StringVal("valid"),
},
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.valid.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
NewStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "valid",
"value": "valid",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
Schema: stacks_testing_provider.TestingResourceSchema,
},
},
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.deferred"),
ComponentInstanceAddr: mustAbsComponentInstance("component.deferred"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.valid"),
ComponentInstanceAddr: mustAbsComponentInstance("component.valid"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.valid.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"deferred-destroy": {
path: "deferred-dependent",
description: "tests what happens when a destroy operation is deferred",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.valid")).
AddDependent(mustAbsComponent("component.deferred"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.valid.testing_resource.data")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "valid",
"value": "valid",
}),
Status: states.ObjectReady,
})).
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.deferred")).
AddDependency(mustAbsComponent("component.valid"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.deferred.testing_deferred_resource.data")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "deferred",
"value": nil,
"deferred": true,
}),
Status: states.ObjectReady,
})).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("valid", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("valid"),
"value": cty.StringVal("valid"),
})).
AddResource("deferred", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("deferred"),
"value": cty.NullVal(cty.String),
"deferred": cty.True,
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.deferred"),
Action: plans.Delete,
Mode: plans.DestroyMode,
RequiredComponents: collections.NewSet[stackaddrs.AbsComponent](mustAbsComponent("component.valid")),
PlannedInputValues: map[string]plans.DynamicValue{
"id": mustPlanDynamicValueDynamicType(cty.StringVal("deferred")),
"defer": mustPlanDynamicValueDynamicType(cty.True),
},
PlannedInputValueMarks: map[string][]cty.PathValueMarks{
"id": nil,
"defer": nil,
},
PlannedOutputValues: make(map[string]cty.Value),
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeDeferredResourceInstancePlanned{
ResourceInstancePlanned: stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.deferred.testing_deferred_resource.data"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_deferred_resource.data"),
PrevRunAddr: mustAbsResourceInstance("testing_deferred_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("deferred"),
"value": cty.NullVal(cty.String),
"deferred": cty.True,
})),
After: mustPlanDynamicValue(cty.NullVal(cty.String)),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "deferred",
"value": nil,
"deferred": true,
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.DeferredResourceSchema,
},
DeferredReason: "resource_config_unknown",
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.valid"),
PlanApplyable: false,
Action: plans.Delete,
Mode: plans.DestroyMode,
PlannedInputValues: map[string]plans.DynamicValue{
"id": mustPlanDynamicValueDynamicType(cty.StringVal("valid")),
"input": mustPlanDynamicValueDynamicType(cty.StringVal("valid")),
},
PlannedInputValueMarks: map[string][]cty.PathValueMarks{
"id": nil,
"input": nil,
},
PlannedOutputValues: make(map[string]cty.Value),
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeDeferredResourceInstancePlanned{
ResourceInstancePlanned: stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.valid.testing_resource.data"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.data"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("valid"),
"value": cty.StringVal("valid"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.String)),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "valid",
"value": "valid",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
DeferredReason: "deferred_prereq",
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.deferred"),
ComponentInstanceAddr: mustAbsComponentInstance("component.deferred"),
Dependencies: collections.NewSet(mustAbsComponent("component.valid")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("deferred"),
mustInputVariable("defer"): cty.True,
},
},
&stackstate.AppliedChangeComponentInstance{
ComponentAddr: mustAbsComponent("component.valid"),
ComponentInstanceAddr: mustAbsComponentInstance("component.valid"),
Dependents: collections.NewSet(mustAbsComponent("component.deferred")),
OutputValues: make(map[addrs.OutputValue]cty.Value),
InputVariables: map[addrs.InputVariable]cty.Value{
mustInputVariable("id"): cty.StringVal("valid"),
mustInputVariable("input"): cty.StringVal("valid"),
},
},
},
},
},
},
"destroy-with-input-dependency": {
path: path.Join("with-single-input-and-output", "input-dependency"),
description: "tests destroy operations with input dependencies",
cycles: []TestCycle{
{
// Just create everything normally, and don't validate it.
planMode: plans.NormalMode,
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.parent"),
ComponentInstanceAddr: mustAbsComponentInstance("component.parent"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-with-provider-dependency": {
path: path.Join("with-single-input-and-output", "provider-dependency"),
description: "tests destroy operations with provider dependencies",
cycles: []TestCycle{
{
// Just create everything normally, and don't validate it.
planMode: plans.NormalMode,
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.parent"),
ComponentInstanceAddr: mustAbsComponentInstance("component.parent"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-with-for-each-dependency": {
path: path.Join("with-single-input-and-output", "for-each-dependency"),
description: "tests destroy operations with for-each dependencies",
cycles: []TestCycle{
{
// Just create everything normally, and don't validate it.
planMode: plans.NormalMode,
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child[\"a\"]"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child[\"a\"].testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.parent"),
ComponentInstanceAddr: mustAbsComponentInstance("component.parent[\"a\"]"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent[\"a\"].testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-with-provider-req": {
path: "auth-provider-w-data",
mutators: []func(store *stacks_testing_provider.ResourceStore, testContext TestContext) TestContext{
func(store *stacks_testing_provider.ResourceStore, testContext TestContext) TestContext {
store.Set("credentials", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("credentials"),
"value": cty.StringVal("zero"),
}))
testContext.providers[addrs.NewDefaultProvider("testing")] = func() (providers.Interface, error) {
provider := stacks_testing_provider.NewProviderWithData(t, store)
provider.Authentication = "zero"
return provider, nil
}
return testContext
},
func(store *stacks_testing_provider.ResourceStore, testContext TestContext) TestContext {
store.Set("credentials", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("credentials"),
"value": cty.StringVal("one"),
}))
testContext.providers[addrs.NewDefaultProvider("testing")] = func() (providers.Interface, error) {
provider := stacks_testing_provider.NewProviderWithData(t, store)
provider.Authentication = "one" // So we must reload the data source in order to authenticate.
return provider, nil
}
return testContext
},
},
cycles: []TestCycle{
{
planMode: plans.NormalMode,
},
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.create"),
ComponentInstanceAddr: mustAbsComponentInstance("component.create"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.create.testing_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.load"),
ComponentInstanceAddr: mustAbsComponentInstance("component.load"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.load.data.testing_data_source.credentials"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-with-provider-req-and-removed": {
path: path.Join("auth-provider-w-data", "removed"),
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.load")).
AddDependent(mustAbsComponent("component.create")).
AddOutputValue("credentials", cty.StringVal("wrong"))). // must reload the credentials
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.create")).
AddDependency(mustAbsComponent("component.load"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.create.testing_resource.resource")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "resource",
"value": nil,
}),
Status: states.ObjectReady,
}).
SetProviderAddr(mustDefaultRootProvider("testing"))).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().AddResource("credentials", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("credentials"),
// we have the wrong value in state, so this correct value must
// be loaded for this test to work.
"value": cty.StringVal("authn"),
})).Build(),
mutators: []func(store *stacks_testing_provider.ResourceStore, testContext TestContext) TestContext{
func(store *stacks_testing_provider.ResourceStore, testContext TestContext) TestContext {
testContext.providers[addrs.NewDefaultProvider("testing")] = func() (providers.Interface, error) {
provider := stacks_testing_provider.NewProviderWithData(t, store)
provider.Authentication = "authn" // So we must reload the data source in order to authenticate.
return provider, nil
}
return testContext
},
},
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.create"),
ComponentInstanceAddr: mustAbsComponentInstance("component.create"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.create.testing_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.load"),
ComponentInstanceAddr: mustAbsComponentInstance("component.load"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.load.data.testing_data_source.credentials"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"empty-destroy-with-data-source": {
path: path.Join("with-data-source", "dependent"),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"id": cty.StringVal("foo"),
},
// deliberately empty, as we expect no changes from an
// empty state.
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.data"),
ComponentInstanceAddr: mustAbsComponentInstance("component.data"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
},
},
},
},
},
"destroy after manual removal": {
path: "removed-offline",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.parent")).
AddDependent(mustAbsComponent("component.child")).
AddOutputValue("value", cty.StringVal("hello"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.parent.testing_resource.resource")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "parent",
"value": "hello",
}),
Status: states.ObjectReady,
})).
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.child")).
AddDependency(mustAbsComponent("component.parent")).
AddInputVariable("value", cty.StringVal("hello"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.child.testing_resource.resource")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "child",
"value": "hello",
}),
Status: states.ObjectReady,
})).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("child", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("child"),
"value": cty.StringVal("hello"),
})).Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.child"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanComplete: true,
PlanApplyable: true,
RequiredComponents: collections.NewSet(mustAbsComponent("component.parent")),
PlannedInputValues: map[string]plans.DynamicValue{
"value": mustPlanDynamicValueDynamicType(cty.UnknownVal(cty.String)),
},
PlannedInputValueMarks: map[string][]cty.PathValueMarks{
"value": nil,
},
PlannedOutputValues: make(map[string]cty.Value),
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child.testing_resource.resource"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.resource"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.resource"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("child"),
"value": cty.StringVal("hello"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "child",
"value": "hello",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.parent"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanComplete: true,
PlanApplyable: false,
PlannedInputValues: make(map[string]plans.DynamicValue),
PlannedOutputValues: map[string]cty.Value{
"value": cty.UnknownVal(cty.String),
},
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.child.testing_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.parent"),
ComponentInstanceAddr: mustAbsComponentInstance("component.parent"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.resource"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"partial destroy recovery": {
path: "component-chain",
description: "this test simulates a partial destroy recovery",
state: stackstate.NewStateBuilder().
// we only have data for the first component, indicating that
// the second and third components were destroyed but not the
// first one for some reason
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.one")).
AddDependent(mustAbsComponent("component.two")).
AddInputVariable("id", cty.StringVal("one")).
AddInputVariable("value", cty.StringVal("foo")).
AddOutputValue("value", cty.StringVal("foo"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.one.testing_resource.data")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "one",
"value": "foo",
}),
Status: states.ObjectReady,
})).
AddInput("value", cty.StringVal("foo")).
AddOutput("value", cty.StringVal("foo")).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("one", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("one"),
"value": cty.StringVal("foo"),
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"value": cty.StringVal("foo"),
},
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.one"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanComplete: true,
PlanApplyable: true,
PlannedInputValues: map[string]plans.DynamicValue{
"id": mustPlanDynamicValueDynamicType(cty.StringVal("one")),
"value": mustPlanDynamicValueDynamicType(cty.StringVal("foo")),
},
PlannedInputValueMarks: map[string][]cty.PathValueMarks{
"id": nil,
"value": nil,
},
PlannedOutputValues: map[string]cty.Value{
"value": cty.StringVal("foo"),
},
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.one.testing_resource.data"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.data"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("one"),
"value": cty.StringVal("foo"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "one",
"value": "foo",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.three"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanComplete: true,
PlanApplyable: true,
RequiredComponents: collections.NewSet(mustAbsComponent("component.two")),
PlannedOutputValues: map[string]cty.Value{
"value": cty.StringVal("foo"),
},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.two"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanComplete: true,
PlanApplyable: true,
RequiredComponents: collections.NewSet(mustAbsComponent("component.one")),
PlannedOutputValues: map[string]cty.Value{
"value": cty.StringVal("foo"),
},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangeOutputValue{
Addr: mustStackOutputValue("value"),
Action: plans.Delete,
Before: cty.StringVal("foo"),
After: cty.NullVal(cty.String),
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeRootInputValue{
Addr: mustStackInputVariable("value"),
Action: plans.NoOp,
Before: cty.StringVal("foo"),
After: cty.StringVal("foo"),
DeleteOnApply: true,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.one"),
ComponentInstanceAddr: mustAbsComponentInstance("component.one"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.one.testing_resource.data"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.three"),
ComponentInstanceAddr: mustAbsComponentInstance("component.three"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.two"),
ComponentInstanceAddr: mustAbsComponentInstance("component.two"),
},
&stackstate.AppliedChangeOutputValue{
Addr: mustStackOutputValue("value"),
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("value"),
},
},
},
},
},
"destroy-partial-state-with-module": {
path: "with-module",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.self")).
AddInputVariable("id", cty.StringVal("self")).
AddInputVariable("input", cty.StringVal("self"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.self.testing_resource.outside")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "self",
"value": "self",
}),
Status: states.ObjectReady,
})).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("self", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("self"),
"value": cty.StringVal("self"),
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
planInputs: map[string]cty.Value{
"id": cty.StringVal("self"),
"input": cty.StringVal("self"),
},
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.self"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanApplyable: true,
PlanComplete: true,
PlannedInputValues: map[string]plans.DynamicValue{
"create": mustPlanDynamicValueDynamicType(cty.True),
"id": mustPlanDynamicValueDynamicType(cty.StringVal("self")),
"input": mustPlanDynamicValueDynamicType(cty.StringVal("self")),
},
PlannedInputValueMarks: map[string][]cty.PathValueMarks{
"create": nil,
"id": nil,
"input": nil,
},
PlannedOutputValues: make(map[string]cty.Value),
PlannedCheckResults: new(states.CheckResults),
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.outside"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.outside"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.outside"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("self"),
"value": cty.StringVal("self"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "self",
"value": "self",
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeRootInputValue{
Addr: mustStackInputVariable("id"),
Action: plans.Create,
Before: cty.NullVal(cty.DynamicPseudoType),
After: cty.StringVal("self"),
DeleteOnApply: true,
},
&stackplan.PlannedChangeRootInputValue{
Addr: mustStackInputVariable("input"),
Action: plans.Create,
Before: cty.NullVal(cty.DynamicPseudoType),
After: cty.StringVal("self"),
DeleteOnApply: true,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.self"),
ComponentInstanceAddr: mustAbsComponentInstance("component.self"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.self.testing_resource.outside"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("id"),
},
&stackstate.AppliedChangeInputVariable{
Addr: mustStackInputVariable("input"),
},
},
},
},
},
"destroy-partial-state": {
path: "destroy-partial-state",
state: stackstate.NewStateBuilder().
AddComponentInstance(stackstate.NewComponentInstanceBuilder(mustAbsComponentInstance("component.parent")).
AddDependent(mustAbsComponent("component.child"))).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.parent.testing_resource.primary")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "primary",
}),
Status: states.ObjectReady,
})).
AddResourceInstance(stackstate.NewResourceInstanceBuilder().
SetAddr(mustAbsResourceInstanceObject("component.parent.testing_resource.secondary")).
SetProviderAddr(mustDefaultRootProvider("testing")).
SetResourceInstanceObjectSrc(states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "secondary",
"value": "primary",
}),
Status: states.ObjectReady,
})).
Build(),
store: stacks_testing_provider.NewResourceStoreBuilder().
AddResource("primary", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("primary"),
"value": cty.NullVal(cty.String),
})).
AddResource("secondary", cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("secondary"),
"value": cty.StringVal("primary"),
})).
Build(),
cycles: []TestCycle{
{
planMode: plans.DestroyMode,
wantPlannedChanges: []stackplan.PlannedChange{
&stackplan.PlannedChangeApplyable{
Applyable: true,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.child"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanApplyable: true,
PlanComplete: true,
RequiredComponents: collections.NewSet(mustAbsComponent("component.parent")),
PlannedOutputValues: make(map[string]cty.Value),
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeComponentInstance{
Addr: mustAbsComponentInstance("component.parent"),
Action: plans.Delete,
Mode: plans.DestroyMode,
PlanApplyable: true,
PlanComplete: true,
PlannedInputValues: make(map[string]plans.DynamicValue),
PlannedOutputValues: map[string]cty.Value{
"deleted_id": cty.UnknownVal(cty.String),
},
PlannedCheckResults: &states.CheckResults{},
PlanTimestamp: fakePlanTimestamp,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.primary"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.primary"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.primary"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("primary"),
"value": cty.NullVal(cty.String),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "primary",
"value": nil,
}),
Status: states.ObjectReady,
Dependencies: make([]addrs.ConfigResource, 0),
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackplan.PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.secondary"),
ChangeSrc: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.secondary"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.secondary"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("secondary"),
"value": cty.StringVal("primary"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
PriorStateSrc: &states.ResourceInstanceObjectSrc{
AttrsJSON: mustMarshalJSONAttrs(map[string]interface{}{
"id": "secondary",
"value": "primary",
}),
Status: states.ObjectReady,
Dependencies: []addrs.ConfigResource{
mustAbsResourceInstance("testing_resource.primary").ConfigResource(),
},
},
ProviderConfigAddr: mustDefaultRootProvider("testing"),
Schema: stacks_testing_provider.TestingResourceSchema,
},
&stackplan.PlannedChangeHeader{
TerraformVersion: version.SemVer,
},
&stackplan.PlannedChangePlannedTimestamp{
PlannedTimestamp: fakePlanTimestamp,
},
},
wantAppliedChanges: []stackstate.AppliedChange{
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.child"),
ComponentInstanceAddr: mustAbsComponentInstance("component.child"),
},
&stackstate.AppliedChangeComponentInstanceRemoved{
ComponentAddr: mustAbsComponent("component.parent"),
ComponentInstanceAddr: mustAbsComponentInstance("component.parent"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.primary"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
&stackstate.AppliedChangeResourceInstanceObject{
ResourceInstanceObjectAddr: mustAbsResourceInstanceObject("component.parent.testing_resource.secondary"),
ProviderConfigAddr: mustDefaultRootProvider("testing"),
},
},
},
},
},
"destroy-with-follow-up": {
path: filepath.Join("with-single-input", "valid"),
cycles: []TestCycle{
{
planMode: plans.NormalMode, // create
planInputs: map[string]cty.Value{
"id": cty.StringVal("self"),
"input": cty.StringVal("self"),
},
},
{
planMode: plans.DestroyMode, // destroy
planInputs: map[string]cty.Value{
"id": cty.StringVal("self"),
"input": cty.StringVal("self"),
},
wantPlannedHooks: &ExpectedHooks{
ComponentExpanded: []*hooks.ComponentInstances{
{
ComponentAddr: mustAbsComponent("component.self"),
InstanceAddrs: []stackaddrs.AbsComponentInstance{mustAbsComponentInstance("component.self")},
},
},
PendingComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
BeginComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
EndComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
ReportResourceInstanceStatus: []*hooks.ResourceInstanceStatusHookData{
{
Addr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing").Provider,
Status: hooks.ResourceInstancePlanning,
},
{
Addr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing").Provider,
Status: hooks.ResourceInstancePlanned,
},
},
ReportResourceInstancePlanned: []*hooks.ResourceInstanceChange{
{
Addr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
Change: &plans.ResourceInstanceChangeSrc{
Addr: mustAbsResourceInstance("testing_resource.data"),
PrevRunAddr: mustAbsResourceInstance("testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing"),
ChangeSrc: plans.ChangeSrc{
Action: plans.Delete,
Before: mustPlanDynamicValue(cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("self"),
"value": cty.StringVal("self"),
})),
After: mustPlanDynamicValue(cty.NullVal(cty.Object(map[string]cty.Type{
"id": cty.String,
"value": cty.String,
}))),
},
},
},
},
ReportComponentInstancePlanned: []*hooks.ComponentInstanceChange{
{
Addr: mustAbsComponentInstance("component.self"),
Remove: 1,
},
},
},
wantAppliedHooks: &ExpectedHooks{
ComponentExpanded: []*hooks.ComponentInstances{
{
ComponentAddr: mustAbsComponent("component.self"),
InstanceAddrs: []stackaddrs.AbsComponentInstance{mustAbsComponentInstance("component.self")},
},
},
PendingComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
BeginComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
EndComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
ReportResourceInstanceStatus: []*hooks.ResourceInstanceStatusHookData{
{
Addr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing").Provider,
Status: hooks.ResourceInstanceApplying,
},
{
Addr: mustAbsResourceInstanceObject("component.self.testing_resource.data"),
ProviderAddr: mustDefaultRootProvider("testing").Provider,
Status: hooks.ResourceInstanceApplied,
},
},
ReportComponentInstanceApplied: []*hooks.ComponentInstanceChange{
{
Addr: mustAbsComponentInstance("component.self"),
Remove: 1,
},
},
},
},
{
planMode: plans.DestroyMode, // should be empty destroy
planInputs: map[string]cty.Value{
"id": cty.StringVal("self"),
"input": cty.StringVal("self"),
},
wantPlannedHooks: &ExpectedHooks{
ComponentExpanded: []*hooks.ComponentInstances{
{
ComponentAddr: mustAbsComponent("component.self"),
InstanceAddrs: []stackaddrs.AbsComponentInstance{mustAbsComponentInstance("component.self")},
},
},
PendingComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
BeginComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
EndComponentInstancePlan: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
ReportComponentInstancePlanned: []*hooks.ComponentInstanceChange{{
Addr: mustAbsComponentInstance("component.self"),
}},
},
wantAppliedHooks: &ExpectedHooks{
ComponentExpanded: []*hooks.ComponentInstances{
{
ComponentAddr: mustAbsComponent("component.self"),
InstanceAddrs: []stackaddrs.AbsComponentInstance{mustAbsComponentInstance("component.self")},
},
},
PendingComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
BeginComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
EndComponentInstanceApply: collections.NewSet[stackaddrs.AbsComponentInstance](
mustAbsComponentInstance("component.self"),
),
ReportComponentInstanceApplied: []*hooks.ComponentInstanceChange{{
Addr: mustAbsComponentInstance("component.self"),
}},
},
},
},
},
}
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
ctx := context.Background()
lock := depsfile.NewLocks()
lock.SetProvider(
addrs.NewDefaultProvider("testing"),
providerreqs.MustParseVersion("0.0.0"),
providerreqs.MustParseVersionConstraints("=0.0.0"),
providerreqs.PreferredHashes([]providerreqs.Hash{}),
)
store := tc.store
if store == nil {
store = stacks_testing_provider.NewResourceStore()
}
testContext := TestContext{
timestamp: &fakePlanTimestamp,
config: loadMainBundleConfigForTest(t, tc.path),
providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("testing"): func() (providers.Interface, error) {
return stacks_testing_provider.NewProviderWithData(t, store), nil
},
},
dependencyLocks: *lock,
}
state := tc.state
for ix, cycle := range tc.cycles {
if tc.mutators != nil {
testContext = tc.mutators[ix](store, testContext)
}
t.Run(strconv.FormatInt(int64(ix), 10), func(t *testing.T) {
var plan *stackplan.Plan
t.Run("plan", func(t *testing.T) {
plan = testContext.Plan(t, ctx, state, cycle)
})
t.Run("apply", func(t *testing.T) {
state = testContext.Apply(t, ctx, plan, cycle)
})
})
}
})
}
} | go | github | https://github.com/hashicorp/terraform | internal/stacks/stackruntime/apply_destroy_test.go |
# multicourse/multicourse_settings.py
#
# central module for providing fixed settings (course name, number, title)
# for multiple courses. Loads this information from django.conf.settings
#
# Allows backward compatibility with settings configurations without
# multiple courses specified.
#
# The central piece of configuration data is the dict COURSE_SETTINGS, with
# keys being the COURSE_NAME (spaces ok), and the value being a dict of
# parameter,value pairs. The required parameters are:
#
# - number : course number (used in the wiki pages)
# - title : humanized descriptive course title
#
# Optional parameters:
#
# - xmlpath : path (relative to data directory) for this course (defaults to "")
#
# If COURSE_SETTINGS does not exist, then fallback to 6.002_Spring_2012 default,
# for now.
from django.conf import settings
#-----------------------------------------------------------------------------
# load course settings
if hasattr(settings, 'COURSE_SETTINGS'): # in the future, this could be replaced by reading an XML file
COURSE_SETTINGS = settings.COURSE_SETTINGS
elif hasattr(settings, 'COURSE_NAME'): # backward compatibility
COURSE_SETTINGS = {settings.COURSE_NAME: {'number': settings.COURSE_NUMBER,
'title': settings.COURSE_TITLE,
'location': settings.COURSE_LOCATION,
},
}
else: # default to 6.002_Spring_2012
COURSE_SETTINGS = {'6.002_Spring_2012': {'number': '6.002x',
'title': 'Circuits and Electronics',
'location': 'i4x://edx/6002xs12/course/6.002 Spring 2012',
},
}
#-----------------------------------------------------------------------------
# wrapper functions around course settings
def get_coursename_from_request(request):
if 'coursename' in request.session:
coursename = request.session['coursename']
settings.COURSE_TITLE = get_course_title(coursename) # overwrite settings.COURSE_TITLE based on this
else: coursename = None
return coursename
def get_course_settings(coursename):
if not coursename:
if hasattr(settings, 'COURSE_DEFAULT'):
coursename = settings.COURSE_DEFAULT
else:
coursename = '6.002_Spring_2012'
if coursename in COURSE_SETTINGS:
return COURSE_SETTINGS[coursename]
coursename = coursename.replace(' ', '_')
if coursename in COURSE_SETTINGS:
return COURSE_SETTINGS[coursename]
return None
def is_valid_course(coursename):
return get_course_settings(coursename) is not None
def get_course_property(coursename, property):
cs = get_course_settings(coursename)
# raise exception instead?
if not cs:
return ''
if property in cs:
return cs[property]
# default
return ''
def get_course_xmlpath(coursename):
return get_course_property(coursename, 'xmlpath')
def get_course_title(coursename):
return get_course_property(coursename, 'title')
def get_course_number(coursename):
return get_course_property(coursename, 'number')
def get_course_github_url(coursename):
return get_course_property(coursename, 'github_url')
def get_course_default_chapter(coursename):
return get_course_property(coursename, 'default_chapter')
def get_course_default_section(coursename):
return get_course_property(coursename, 'default_section')
def get_course_location(coursename):
return get_course_property(coursename, 'location') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import webapp2
import tba_config
from controllers.backup_controller import TbaCSVBackupEventsEnqueue, TbaCSVBackupEventDo, TbaCSVRestoreEventsEnqueue, TbaCSVRestoreEventDo
from controllers.backup_controller import TbaCSVBackupTeamsEnqueue, TbaCSVBackupTeamsDo
from controllers.datafeed_controller import TbaVideosGet, TbaVideosEnqueue
from controllers.datafeed_controller import FmsEventListGet, FmsTeamListGet
from controllers.datafeed_controller import OffseasonMatchesGet
from controllers.datafeed_controller import TwitterFrcfmsMatchesGet
from controllers.datafeed_controller import FMSAPIAwardsEnqueue, FMSAPIEventAlliancesEnqueue, FMSAPIEventRankingsEnqueue, FMSAPIMatchesEnqueue
from controllers.datafeed_controller import FMSAPIAwardsGet, FMSAPIEventAlliancesGet, FMSAPIEventRankingsGet, FMSAPIMatchesGet
from controllers.datafeed_controller import UsfirstEventDetailsEnqueue, UsfirstEventDetailsGet, UsfirstEventListGet
from controllers.datafeed_controller import UsfirstAwardsEnqueue, UsfirstAwardsGet
from controllers.datafeed_controller import UsfirstEventAlliancesEnqueue, UsfirstEventAlliancesGet
from controllers.datafeed_controller import UsfirstMatchesEnqueue, UsfirstMatchesGet, UsfirstEventRankingsEnqueue, UsfirstEventRankingsGet
from controllers.datafeed_controller import UsfirstTeamDetailsEnqueue, UsfirstTeamDetailsRollingEnqueue, UsfirstTeamDetailsGet, UsfirstTeamsTpidsGet
from controllers.datafeed_controller import UsfirstPre2003TeamEventsEnqueue, UsfirstPre2003TeamEventsGet
from controllers.cron_controller import DistrictPointsCalcEnqueue, DistrictPointsCalcDo
from controllers.cron_controller import EventShortNameCalcEnqueue, EventShortNameCalcDo
from controllers.cron_controller import EventTeamRepairDo, EventTeamUpdate, EventTeamUpdateEnqueue
from controllers.cron_controller import EventMatchstatsDo, EventMatchstatsEnqueue
from controllers.cron_controller import FinalMatchesRepairDo
from controllers.cron_controller import YearInsightsEnqueue, YearInsightsDo, OverallInsightsEnqueue, OverallInsightsDo, TypeaheadCalcEnqueue, TypeaheadCalcDo
from controllers.cron_controller import UpcomingNotificationDo
from controllers.admin.admin_cron_controller import AdminMobileClearEnqueue, AdminMobileClear, AdminSubsClearEnqueue, AdminSubsClear, \
AdminWebhooksClearEnqueue, AdminWebhooksClear
app = webapp2.WSGIApplication([('/tasks/enqueue/csv_backup_events', TbaCSVBackupEventsEnqueue),
('/tasks/enqueue/csv_backup_events/([0-9]*)', TbaCSVBackupEventsEnqueue),
('/tasks/do/csv_backup_event/(.*)', TbaCSVBackupEventDo),
('/tasks/enqueue/csv_restore_events', TbaCSVRestoreEventsEnqueue),
('/tasks/enqueue/csv_restore_events/([0-9]*)', TbaCSVRestoreEventsEnqueue),
('/tasks/do/csv_restore_event/(.*)', TbaCSVRestoreEventDo),
('/tasks/enqueue/csv_backup_teams', TbaCSVBackupTeamsEnqueue),
('/tasks/do/csv_backup_teams', TbaCSVBackupTeamsDo),
('/tasks/enqueue/tba_videos', TbaVideosEnqueue),
('/tasks/enqueue/fmsapi_awards/(.*)', FMSAPIAwardsEnqueue),
('/tasks/enqueue/fmsapi_event_alliances/(.*)', FMSAPIEventAlliancesEnqueue),
('/tasks/enqueue/fmsapi_event_rankings/(.*)', FMSAPIEventRankingsEnqueue),
('/tasks/enqueue/fmsapi_matches/(.*)', FMSAPIMatchesEnqueue),
('/tasks/enqueue/usfirst_event_alliances/(.*)', UsfirstEventAlliancesEnqueue),
('/tasks/enqueue/usfirst_event_details/([0-9]*)', UsfirstEventDetailsEnqueue),
('/tasks/enqueue/usfirst_event_rankings/(.*)', UsfirstEventRankingsEnqueue),
('/tasks/enqueue/usfirst_awards/(.*)', UsfirstAwardsEnqueue),
('/tasks/enqueue/usfirst_matches/(.*)', UsfirstMatchesEnqueue),
('/tasks/enqueue/usfirst_team_details', UsfirstTeamDetailsEnqueue),
('/tasks/enqueue/usfirst_team_details_rolling', UsfirstTeamDetailsRollingEnqueue),
('/tasks/enqueue/usfirst_pre2003_team_events', UsfirstPre2003TeamEventsEnqueue),
('/tasks/get/fms_event_list', FmsEventListGet),
('/tasks/get/fms_team_list', FmsTeamListGet),
('/tasks/get/offseason_matches/(.*)', OffseasonMatchesGet),
('/tasks/get/tba_videos/(.*)', TbaVideosGet),
('/tasks/get/twitter_frcfms_matches', TwitterFrcfmsMatchesGet),
('/tasks/get/fmsapi_awards/(.*)', FMSAPIAwardsGet),
('/tasks/get/fmsapi_event_alliances/(.*)', FMSAPIEventAlliancesGet),
('/tasks/get/fmsapi_event_rankings/(.*)', FMSAPIEventRankingsGet),
('/tasks/get/fmsapi_matches/(.*)', FMSAPIMatchesGet),
('/tasks/get/usfirst_event_alliances/(.*)', UsfirstEventAlliancesGet),
('/tasks/get/usfirst_event_list/([0-9]*)', UsfirstEventListGet),
('/tasks/get/usfirst_event_details/([0-9]*)/([0-9]*)', UsfirstEventDetailsGet),
('/tasks/get/usfirst_event_rankings/(.*)', UsfirstEventRankingsGet),
('/tasks/get/usfirst_awards/(.*)', UsfirstAwardsGet),
('/tasks/get/usfirst_matches/(.*)', UsfirstMatchesGet),
('/tasks/get/usfirst_team_details/(.*)', UsfirstTeamDetailsGet),
('/tasks/get/usfirst_teams_tpids/([0-9]*)', UsfirstTeamsTpidsGet),
('/tasks/get/usfirst_pre2003_team_events/(.*)', UsfirstPre2003TeamEventsGet),
('/tasks/math/enqueue/district_points_calc/([0-9]*)', DistrictPointsCalcEnqueue),
('/tasks/math/do/district_points_calc/(.*)', DistrictPointsCalcDo),
('/tasks/math/enqueue/event_short_name_calc_enqueue/([0-9]*)', EventShortNameCalcEnqueue),
('/tasks/math/do/event_short_name_calc_do/(.*)', EventShortNameCalcDo),
('/tasks/math/enqueue/event_matchstats/(.*)', EventMatchstatsEnqueue),
('/tasks/math/enqueue/eventteam_update/(.*)', EventTeamUpdateEnqueue),
('/tasks/math/do/event_matchstats/(.*)', EventMatchstatsDo),
('/tasks/math/do/eventteam_repair', EventTeamRepairDo),
('/tasks/math/do/eventteam_update/(.*)', EventTeamUpdate),
('/tasks/math/do/final_matches_repair/([0-9]*)', FinalMatchesRepairDo),
('/tasks/math/enqueue/overallinsights/(.*)', OverallInsightsEnqueue),
('/tasks/math/do/overallinsights/(.*)', OverallInsightsDo),
('/tasks/math/enqueue/insights/(.*)/([0-9]*)', YearInsightsEnqueue),
('/tasks/math/do/insights/(.*)/([0-9]*)', YearInsightsDo),
('/tasks/math/enqueue/typeaheadcalc', TypeaheadCalcEnqueue),
('/tasks/math/do/typeaheadcalc', TypeaheadCalcDo),
('/tasks/notifications/upcoming_match', UpcomingNotificationDo),
('/tasks/admin/enqueue/clear_mobile_duplicates', AdminMobileClearEnqueue),
('/tasks/admin/clear_mobile_duplicates', AdminMobileClear),
('/tasks/admin/enqueue/clear_old_subs', AdminSubsClearEnqueue),
('/tasks/admin/clear_old_subs', AdminSubsClear),
('/tasks/admin/enqueue/clear_old_webhooks', AdminWebhooksClearEnqueue),
('/tasks/admin/clear_old_webhooks', AdminWebhooksClear),
],
debug=tba_config.DEBUG) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class LocalNews8IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?localnews8\.com/(?:[^/]+/)*(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.localnews8.com/news/rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings/35183304',
'md5': 'be4d48aea61aa2bde7be2ee47691ad20',
'info_dict': {
'id': '35183304',
'display_id': 'rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings',
'ext': 'mp4',
'title': 'Rexburg business turns carbon fiber scraps into wedding ring',
'description': 'The process was first invented by Lamborghini and less than a dozen companies around the world use it.',
'duration': 153,
'timestamp': 1441844822,
'upload_date': '20150910',
'uploader_id': 'api',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'partnerId\s*[:=]\s*(["\'])(?P<id>\d+)\1',
webpage, 'partner id', group='id')
kaltura_id = self._search_regex(
r'videoIdString\s*[:=]\s*(["\'])kaltura:(?P<id>[0-9a-z_]+)\1',
webpage, 'videl id', group='id')
return {
'_type': 'url_transparent',
'url': 'kaltura:%s:%s' % (partner_id, kaltura_id),
'ie_key': 'Kaltura',
'id': video_id,
'display_id': display_id,
} | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build windows
// +build windows
package clistate
import (
"math"
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procCreateEventW = modkernel32.NewProc("CreateEventW")
)
const (
// dwFlags defined for LockFileEx
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
_LOCKFILE_FAIL_IMMEDIATELY = 1
_LOCKFILE_EXCLUSIVE_LOCK = 2
)
func (s *LocalState) lock() error {
// even though we're failing immediately, an overlapped event structure is
// required
ol, err := newOverlapped()
if err != nil {
return err
}
defer syscall.CloseHandle(ol.HEvent)
return lockFileEx(
syscall.Handle(s.stateFileOut.Fd()),
_LOCKFILE_EXCLUSIVE_LOCK|_LOCKFILE_FAIL_IMMEDIATELY,
0, // reserved
0, // bytes low
math.MaxUint32, // bytes high
ol,
)
}
func (s *LocalState) unlock() error {
// the file is closed in Unlock
return nil
}
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall6(
procLockFileEx.Addr(),
6,
uintptr(h),
uintptr(flags),
uintptr(reserved),
uintptr(locklow),
uintptr(lockhigh),
uintptr(unsafe.Pointer(ol)),
)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
// newOverlapped creates a structure used to track asynchronous
// I/O requests that have been issued.
func newOverlapped() (*syscall.Overlapped, error) {
event, err := createEvent(nil, true, false, nil)
if err != nil {
return nil, err
}
return &syscall.Overlapped{HEvent: event}, nil
}
func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) {
var _p0 uint32
if manualReset {
_p0 = 1
}
var _p1 uint32
if initialState {
_p1 = 1
}
r0, _, e1 := syscall.Syscall6(
procCreateEventW.Addr(),
4,
uintptr(unsafe.Pointer(sa)),
uintptr(_p0),
uintptr(_p1),
uintptr(unsafe.Pointer(name)),
0,
0,
)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
} | go | github | https://github.com/hashicorp/terraform | internal/command/clistate/local_state_lock_windows.go |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_EXECUTABLE_CONTEXT_H_
#define TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_EXECUTABLE_CONTEXT_H_
#include <memory>
#include <utility>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tfrt/bef/bef_buffer.h" // from @tf_runtime
#include "tfrt/bef_executor/bef_file.h" // from @tf_runtime
#include "tfrt/host_context/resource_context.h" // from @tf_runtime
#include "tfrt/support/ref_count.h" // from @tf_runtime
namespace tensorflow {
namespace tfrt_stub {
// Stores executable-related data.
struct ExecutableContext {
ExecutableContext(mlrt::bc::Buffer bytecode_buffer,
std::unique_ptr<mlrt::LoadedExecutable> bytecode_executable)
: bytecode_buffer(std::move(bytecode_buffer)),
bytecode_executable(std::move(bytecode_executable)) {}
ExecutableContext(tfrt::BefBuffer bef,
tfrt::RCReference<tfrt::BEFFile> bef_file)
: bef(std::move(bef)), bef_file(std::move(bef_file)) {}
bool IsForMlrt() const { return bytecode_executable != nullptr; }
// Only one set of values will be filled.
// For the MLRT path.
mlrt::bc::Buffer bytecode_buffer;
std::unique_ptr<mlrt::LoadedExecutable> bytecode_executable;
// For the TFRT path.
tfrt::BefBuffer bef;
tfrt::RCReference<tfrt::BEFFile> bef_file;
// There are some resources that need re-creating when the executable is
// re-created, so a resource context is stored along with the executable.
// This resource context is meant to be passed to the op kernels for their
// references. See the comment above `GraphExecutor::resource_context_`
// about the todo to merge that resource context with this one.
tfrt::ResourceContext resource_context;
};
} // namespace tfrt_stub
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_EXECUTABLE_CONTEXT_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tfrt/graph_executor/executable_context.h |
@import url('https://fonts.googleapis.com/icon?family=Material+Symbols+Outlined');
:host {
display: flex;
justify-content: center;
font-family: var(--inter-font);
}
[ngMenuTrigger] {
display: flex;
cursor: pointer;
align-items: center;
padding: 0.6rem 2rem;
border-radius: 0.5rem;
border: 1px solid transparent;
background-color: color-mix(in srgb, var(--vivid-pink) 5%, transparent);
color: color-mix(in srgb, var(--vivid-pink) 70%, var(--primary-contrast));
}
[ngMenuTrigger] .icon {
font-size: 1.5rem;
opacity: 0.875;
}
[ngMenu] {
gap: 3px;
width: 15rem;
display: flex;
flex-direction: column;
}
[ngMenu] .group {
padding: 0.25rem;
border-radius: 0.25rem;
background-color: var(--page-background);
box-shadow: 0 1px 2px 1px color-mix(in srgb, var(--primary-contrast) 25%, transparent);
}
[ngMenu] .group:first-of-type {
border-top-left-radius: 1rem;
border-top-right-radius: 1rem;
}
[ngMenu] .group:last-of-type {
border-bottom-left-radius: 1rem;
border-bottom-right-radius: 1rem;
}
[ngMenu][data-visible='false'] {
display: none;
}
[ngMenuItem] {
outline: none;
display: flex;
cursor: pointer;
align-items: center;
gap: 0.5rem;
padding: 0.75rem;
font-size: 0.875rem;
border-radius: 0.75rem;
}
[ngMenuTrigger]:hover,
[ngMenuTrigger][aria-expanded='true'] {
background: color-mix(in srgb, var(--vivid-pink) 10%, transparent);
}
[ngMenuItem][data-active='true'] {
color: color-mix(in srgb, var(--vivid-pink) 70%, var(--primary-contrast));
background: color-mix(in srgb, var(--vivid-pink) 5%, transparent);
}
[ngMenuItem]:focus,
[ngMenuTrigger]:focus {
outline: 2px solid var(--vivid-pink);
}
[ngMenuItem] .icon {
opacity: 0.875;
font-size: 1.25rem;
}
[ngMenuItem] .label {
flex: 1;
opacity: 0.875;
font-size: 0.875rem;
}
[ngMenuItem]:not([aria-expanded='true']) .arrow {
opacity: 0.5;
}
[ngMenu] .separator {
border-top: 1px solid var(--gray-500);
margin: 0.25rem 0;
opacity: 0.25;
}
[ngMenuItem][aria-disabled='true'] {
opacity: 0.5;
cursor: default;
} | css | github | https://github.com/angular/angular | adev/src/content/examples/aria/menu/src/menu-trigger-disabled/material/app/app.css |
from __future__ import division, absolute_import, print_function
import numpy.distutils.fcompiler
from numpy.testing import TestCase, run_module_suite, assert_
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions(TestCase):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions(TestCase):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
if __name__ == '__main__':
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import types
from modjy_exceptions import *
class write_object:
def __init__(self, ostream):
self.ostream = ostream
self.num_writes = 0
def __call__(self, *args, **keywords):
if len(args) != 1 or not isinstance(args[0], types.StringTypes):
raise NonStringOutput("Invocation of write callable requires exactly one string argument")
try:
self.ostream.write(args[0]) # Jython implicitly converts the (binary) string to a byte array
# WSGI requires that all output be flushed before returning to the application
# According to the java docs: " The flush method of OutputStream does nothing."
# Still, leave it in place for now: it's in the right place should this
# code ever be ported to another platform.
self.ostream.flush()
self.num_writes += 1
except Exception, x:
raise ModjyIOException(x) | unknown | codeparrot/codeparrot-clean | ||
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['RoleDefinition']
class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
super(RoleDefinition, self).__init__()
self._play = play
self._variable_manager = variable_manager
self._loader = loader
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
# def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# save the original ds for use later
self._ds = ds
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds.update(new_role_def)
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
'''
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
'''
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
if templar._contains_vars(role_name):
role_name = templar.template(role_name)
return role_name
def _load_role_path(self, role_name):
'''
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
'''
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# next, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# finally as a last resort we look in the current basedir as set
# in the loader (which should be the playbook dir itself) but without
# the roles/ dir appended
role_search_paths.append(self._loader.get_basedir())
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:
all_vars = self._variable_manager.get_vars(play=self._play)
else:
all_vars = dict()
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
path = templar.template(path)
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# if not found elsewhere try to extract path from name
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
def _split_role_params(self, ds):
'''
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
'''
role_def = dict()
role_params = dict()
base_attribute_names = frozenset(self._valid_attrs.keys())
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
# FIXME: hard-coded list of exception key names here corresponds to the
# connection fields in the Base class. There may need to be some
# other mechanism where we exclude certain kinds of field attributes,
# or make this list more automatic in some way so we don't have to
# remember to update it manually.
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
if key in ('connection', 'port', 'remote_user'):
display.deprecated("Using '%s' as a role param has been deprecated. " % key +
"In the future, these values should be entered in the `vars:` " +
"section for roles, but for now we'll store it as both a param and an attribute.", version="2.7")
role_def[key] = value
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path | unknown | codeparrot/codeparrot-clean | ||
# #
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for easyconfig/types.py
@author: Kenneth Hoste (Ghent University)
"""
from test.framework.utilities import EnhancedTestCase
from unittest import TestLoader, main
from easybuild.tools.build_log import EasyBuildError
from easybuild.framework.easyconfig.types import as_hashable, check_element_types, check_key_types, check_known_keys
from easybuild.framework.easyconfig.types import check_required_keys, check_type_of_param_value, convert_value_type
from easybuild.framework.easyconfig.types import DEPENDENCIES, DEPENDENCY_DICT, NAME_VERSION_DICT
from easybuild.framework.easyconfig.types import is_value_of_type, to_name_version_dict, to_dependencies, to_dependency
class TypeCheckingTest(EnhancedTestCase):
"""Tests for value type checking of easyconfig parameters."""
def test_check_type_of_param_value(self):
"""Test check_type_of_param_value function."""
# check selected values that should be strings
for key in ['name', 'version']:
self.assertEqual(check_type_of_param_value(key, 'foo'), (True, 'foo'))
for not_a_string in [100, 1.5, ('bar',), ['baz'], None]:
self.assertEqual(check_type_of_param_value(key, not_a_string), (False, None))
# value doesn't matter, only type does
self.assertEqual(check_type_of_param_value(key, ''), (True, ''))
# parameters with no type specification always pass the check
key = 'nosucheasyconfigparametereverhopefully'
for val in ['foo', 100, 1.5, ('bar',), ['baz'], '', None]:
self.assertEqual(check_type_of_param_value(key, val), (True, val))
# check use of auto_convert
self.assertEqual(check_type_of_param_value('version', 1.5), (False, None))
self.assertEqual(check_type_of_param_value('version', 1.5, auto_convert=True), (True, '1.5'))
# check type checking of toolchain (non-trivial type: dict with only name/version keys & string values)
toolchain = {'name': 'goolf', 'version': '1.4.10'}
self.assertEqual(check_type_of_param_value('toolchain', toolchain), (True, toolchain))
# missing 'version' key
self.assertEqual(check_type_of_param_value('toolchain', {'name': 'intel'}), (False, None))
# non-string value for 'version'
toolchain = {'name': 'goolf', 'version': 100}
self.assertEqual(check_type_of_param_value('toolchain', toolchain), (False, None))
# check auto-converting of toolchain value
toolchain = {'name': 'intel', 'version': '2015a'}
for tcspec in ["intel, 2015a", ['intel', '2015a'], toolchain]:
self.assertEqual(check_type_of_param_value('toolchain', tcspec, auto_convert=True), (True, toolchain))
def test_convert_value_type(self):
"""Test convert_value_type function."""
# to string
self.assertEqual(convert_value_type(100, basestring), '100')
self.assertEqual(convert_value_type((100,), str), '(100,)')
self.assertEqual(convert_value_type([100], basestring), '[100]')
self.assertEqual(convert_value_type(None, str), 'None')
# to int/float
self.assertEqual(convert_value_type('100', int), 100)
self.assertEqual(convert_value_type('0', int), 0)
self.assertEqual(convert_value_type('-123', int), -123)
self.assertEqual(convert_value_type('1.6', float), 1.6)
self.assertEqual(convert_value_type('5', float), 5.0)
self.assertErrorRegex(EasyBuildError, "Converting type of .* failed", convert_value_type, '', int)
# 1.6 can't be parsed as an int (yields "invalid literal for int() with base 10" error)
self.assertErrorRegex(EasyBuildError, "Converting type of .* failed", convert_value_type, '1.6', int)
# idempotency
self.assertEqual(convert_value_type('foo', basestring), 'foo')
self.assertEqual(convert_value_type('foo', str), 'foo')
self.assertEqual(convert_value_type(100, int), 100)
self.assertEqual(convert_value_type(1.6, float), 1.6)
# complex types
dep = [{'GCC': '1.2.3', 'versionsuffix': 'foo'}]
converted_dep = [{'name': 'GCC', 'version': '1.2.3', 'versionsuffix': 'foo'}]
self.assertEqual(convert_value_type(dep, DEPENDENCIES), converted_dep)
# no conversion function available for specific type
class Foo():
pass
self.assertErrorRegex(EasyBuildError, "No conversion function available", convert_value_type, None, Foo)
def test_to_name_version_dict(self):
""" Test toolchain string to dict conversion """
# normal cases
self.assertEqual(to_name_version_dict("intel, 2015a"), {'name': 'intel', 'version': '2015a'})
self.assertEqual(to_name_version_dict(('intel', '2015a')), {'name': 'intel', 'version': '2015a'})
self.assertEqual(to_name_version_dict(['gcc', '4.7']), {'name': 'gcc', 'version': '4.7'})
tc = {'name': 'intel', 'version': '2015a'}
self.assertEqual(to_name_version_dict(tc), tc)
# wrong type
self.assertErrorRegex(EasyBuildError, r"Conversion of .* \(type .*\) to name and version dict is not supported",
to_name_version_dict, 1000)
# wrong number of elements
errstr = "Can not convert .* to name and version .*. Expected 2 elements"
self.assertErrorRegex(EasyBuildError, errstr, to_name_version_dict, "intel, 2015, a")
self.assertErrorRegex(EasyBuildError, errstr, to_name_version_dict, "intel")
self.assertErrorRegex(EasyBuildError, errstr, to_name_version_dict, ['gcc', '4', '7'])
# missing keys
self.assertErrorRegex(EasyBuildError, "Incorrect set of keys", to_name_version_dict, {'name': 'intel'})
def test_to_dependency(self):
""" Test dependency dict to tuple conversion """
# normal cases
lib_dict = {
'name': 'lib',
'version': '1.2.8',
'toolchain': {'name': 'GCC', 'version': '4.8.2'},
}
self.assertEqual(to_dependency({'lib': '1.2.8'}), {'name': 'lib', 'version': '1.2.8'})
self.assertEqual(to_dependency({'lib': '1.2.8', 'toolchain': 'GCC, 4.8.2'}), lib_dict)
self.assertEqual(to_dependency({'lib': '1.2.8', 'toolchain': ['GCC', '4.8.2']}), lib_dict)
lib_dict.update({'versionsuffix': ''})
# to_dependency doesn't touch values of non-dict type
self.assertEqual(to_dependency(('foo', '1.3')), ('foo','1.3'))
self.assertEqual(to_dependency(('foo', '1.3', '-suff', ('GCC', '4.8.2'))), ('foo', '1.3', '-suff', ('GCC','4.8.2')))
self.assertEqual(to_dependency('foo/1.3'), 'foo/1.3')
self.assertEqual(to_dependency({'name':'fftw/3.3.4.2', 'external_module': True}),
{
'external_module': True,
'full_mod_name': 'fftw/3.3.4.2',
'name': None,
'short_mod_name': 'fftw/3.3.4.2',
'version': None,
})
foo_dict = {
'name': 'foo',
'version': '1.3',
'versionsuffix': '-bar',
}
self.assertEqual(to_dependency({'foo': '1.3', 'versionsuffix': '-bar'}), foo_dict)
foo_dict.update({'toolchain': {'name': 'GCC', 'version': '4.8.2'}})
self.assertEqual(to_dependency({'foo': '1.3', 'versionsuffix': '-bar', 'toolchain': 'GCC, 4.8.2'}), foo_dict)
# using 'name' and 'version' in dictionary being passed yields the expected result
foo_dict = {'name': 'foo', 'version': '1.2.3'}
self.assertEqual(to_dependency(foo_dict), foo_dict)
foo_dict.update({'toolchain': {'name': 'GCC', 'version': '4.8.2'}})
self.assertEqual(to_dependency({'name': 'foo', 'version': '1.2.3', 'toolchain': ['GCC', '4.8.2']}), foo_dict)
self.assertEqual(to_dependency(foo_dict), foo_dict)
# extra keys ruin it
foo_dict.update({'extra_key': 'bogus'})
self.assertErrorRegex(EasyBuildError, "Found unexpected \(key, value\) pair: .*", to_dependency, foo_dict)
# no name/version
self.assertErrorRegex(EasyBuildError, "Can not parse dependency without name and version: .*",
to_dependency, {'toolchain': 'lib, 1.2.8', 'versionsuffix': 'suff'})
# too many values
self.assertErrorRegex(EasyBuildError, "Found unexpected \(key, value\) pair: .*",
to_dependency, {'lib': '1.2.8', 'foo':'1.3', 'toolchain': 'lib, 1.2.8', 'versionsuffix': 'suff'})
def test_to_dependencies(self):
"""Test to_dependencies function."""
self.assertEqual(to_dependencies([]), [])
deps = [
'foo/1.2.3',
('foo', '1.2.3'),
('bar', '4.5.6', '-test'),
('foobar', '1.3.5', '', ('GCC', '4.7.2')),
{'toy': '0.0'},
{'toy': '0.0', 'versionsuffix': '-bleh'},
{'toy': '0.0', 'toolchain': 'gompi, 2015a'},
{'gzip': '1.5', 'versionsuffix': '', 'toolchain': 'foss, 2014b'},
{'name': 'toy', 'version': '0.0', 'versionsuffix': '-bleh',
'toolchain': {'name': 'gompi', 'version': '2015a'}},
]
self.assertEqual(to_dependencies(deps), [
'foo/1.2.3',
('foo', '1.2.3'),
('bar', '4.5.6', '-test'),
('foobar', '1.3.5', '', ('GCC','4.7.2')),
{'name': 'toy', 'version': '0.0'},
{'name': 'toy', 'version': '0.0', 'versionsuffix': '-bleh'},
{'name': 'toy', 'version': '0.0', 'toolchain': {'name': 'gompi', 'version': '2015a'}},
{'name': 'gzip', 'version': '1.5', 'versionsuffix': '',
'toolchain': {'name': 'foss', 'version': '2014b'}},
{'name': 'toy', 'version': '0.0', 'versionsuffix': '-bleh',
'toolchain': {'name': 'gompi', 'version': '2015a'}},
])
def test_is_value_of_type(self):
"""Test is_value_of_type function."""
self.assertTrue(is_value_of_type({'one': 1}, dict))
self.assertTrue(is_value_of_type(1, int))
self.assertTrue(is_value_of_type("foo", str))
self.assertTrue(is_value_of_type(['a', 'b'], list))
self.assertTrue(is_value_of_type(('a', 'b'), tuple))
self.assertFalse(is_value_of_type({'one': 1}, list))
self.assertFalse(is_value_of_type(1, str))
self.assertFalse(is_value_of_type("foo", int))
# toolchain type check
self.assertTrue(is_value_of_type({'name': 'intel', 'version': '2015a'}, NAME_VERSION_DICT))
# version value should be string, not int
self.assertFalse(is_value_of_type({'name': 'intel', 'version': 100}, NAME_VERSION_DICT))
# missing version key
self.assertFalse(is_value_of_type({'name': 'intel', 'foo': 'bar'}, NAME_VERSION_DICT))
# extra key, shouldn't be there
self.assertFalse(is_value_of_type({'name': 'intel', 'version': '2015a', 'foo': 'bar'}, NAME_VERSION_DICT))
# dependency type check
self.assertTrue(is_value_of_type({'name': 'intel', 'version': '2015a'}, DEPENDENCY_DICT))
self.assertTrue(is_value_of_type({
'name': 'intel',
'version': '2015a',
'toolchain': {'name': 'intel', 'version': '2015a'},
'versionsuffix': 'foo',
}, DEPENDENCY_DICT))
# no version key
self.assertFalse(is_value_of_type({'name': 'intel'}, NAME_VERSION_DICT))
# too many keys
self.assertFalse(is_value_of_type({
'name': 'intel',
'version': '2015a',
'toolchain': 'intel, 2015a',
'versionsuffix': 'foo',
'extra': 'bar',
}, DEPENDENCY_DICT))
# list of dependencies type check
dependencies = [
{'name': 'intel', 'version': '2015a'},
{'name': 'gcc', 'version': '4.1.3'},
{'name': 'dummy', 'version': 'dummy', 'versionsuffix': 'foo',
'toolchain': {'name': 'intel', 'version': '2015a'}},
]
self.assertTrue(is_value_of_type(dependencies, DEPENDENCIES))
# string value for toolchain key is not OK
dependencies.append({'name': 'foo', 'version': '1.2.3', 'toolchain': 'intel, 2015a'})
self.assertFalse(is_value_of_type(dependencies, DEPENDENCIES))
# wrong keys (name/version is strictly required)
self.assertFalse(is_value_of_type([{'a':'b', 'c':'d'}], DEPENDENCIES))
# not a list
self.assertFalse(is_value_of_type({'name': 'intel', 'version': '2015a'}, DEPENDENCIES))
# no extra keys allowed, only name/version/versionsuffix/toolchain
self.assertFalse(is_value_of_type({'name': 'intel', 'version': '2015a', 'foo': 'bar'}, DEPENDENCIES))
def test_as_hashable(self):
"""Test as_hashable function."""
hashable_value = (
('one', (1,)),
('two', (1,2)),
)
self.assertEqual(as_hashable({'one': [1], 'two': [1, 2]}), hashable_value)
hashable_value = (
('one', (
('two', (1, 2)),
),),
)
self.assertEqual(as_hashable({'one': {'two': [1, 2]}}), hashable_value)
def test_check_key_types(self):
"""Test check_key_types function."""
self.assertTrue(check_key_types({'name': 'intel', 'version': '2015a'}, [str]))
self.assertTrue(check_key_types({'one': 1, 2: 'two'}, (int, str)))
self.assertFalse(check_key_types({'name': 'intel', 'version': '2015a'}, []))
self.assertFalse(check_key_types({'name': 'intel', 'version': '2015a'}, (int,)))
self.assertFalse(check_key_types({'one': 1, 2: 'two'}, [str]))
def test_check_known_keys(self):
"""Test check_known_keys function."""
self.assertTrue(check_known_keys({'one': 1, 'two': 2}, ['one', 'two']))
self.assertTrue(check_known_keys({'one': 1, 'two': 2}, ('one', 'two', 'three')))
self.assertFalse(check_known_keys({'one': 1, 'two': 2}, ['one']))
known_keys = ['name', 'toolchain', 'version', 'versionsuffix']
self.assertTrue(check_known_keys({'name': 'intel', 'version': '2015a'}, known_keys))
self.assertTrue(check_known_keys({'name': 'intel', 'version': '2015a', 'versionsuffix': '-test'}, known_keys))
self.assertFalse(check_known_keys({'name': 'intel', 'version': '2015a', 'foo': 'bar'}, known_keys))
def test_check_required_keys(self):
"""Test check_required_keys function."""
self.assertTrue(check_required_keys({'one': 1, 'two': 2}, ['one', 'two']))
self.assertFalse(check_required_keys({'one': 1, 'two': 2}, ('one', 'two', 'three')))
self.assertTrue(check_required_keys({'one': 1, 'two': 2}, ['one']))
req_keys = ['name', 'version']
self.assertTrue(check_required_keys({'name': 'intel', 'version': '2015a'}, req_keys))
self.assertFalse(check_required_keys({'name': 'intel'}, req_keys))
self.assertTrue(check_required_keys({'name': 'foo', 'version': '1.2.3', 'versionsuffix': '-test'}, req_keys))
self.assertFalse(check_required_keys({'name': 'foo', 'versionsuffix': '-test'}, req_keys))
def test_check_element_types(self):
"""Test check_element_types function."""
# checking types of list elements
self.assertTrue(check_element_types(['one', 'two'], [str]))
self.assertTrue(check_element_types(['one', 'two'], [int, str]))
self.assertTrue(check_element_types(['one', 2], [int, str]))
self.assertFalse(check_element_types(['one', 2], [int]))
# checking types of dict values (simple list of allowed types)
self.assertTrue(check_element_types({'one': 1, 2: 'two'}, [int, str]))
self.assertFalse(check_element_types({'one': 1, 2: 'two'}, [str]))
self.assertFalse(check_element_types({'one': 1, 'two': None}, [str]))
# checking types of dict values (dict of allowed types)
self.assertTrue(check_element_types({'one': 1, 2: 'two'}, {'one': [int], 2: [str]}))
self.assertFalse(check_element_types({'one': 1, 2: 'two'}, {'one': [str], 2: [str]}))
self.assertTrue(check_element_types([], []))
self.assertTrue(check_element_types({}, []))
self.assertTrue(check_element_types({}, {}))
# if no (matching) allowed types are listed, check returns False
self.assertFalse(check_element_types({'one': 1}, []))
self.assertFalse(check_element_types({'one': 1}, {}))
self.assertFalse(check_element_types({'one': 1}, {'two': int}))
# errors
self.assertErrorRegex(EasyBuildError, "Don't know how to check element types .*", check_element_types, 1, [])
def suite():
""" returns all the testcases in this module """
return TestLoader().loadTestsFromTestCase(TypeCheckingTest)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""Compares two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
"""Compares two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""Compares two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the
two nodes have the same type. This must be implemented by the
concrete subclass. Nodes should be considered equal if they
have the same structure, ignoring the prefix string and other
context information.
"""
raise NotImplementedError
def clone(self):
"""Returns a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""Returns a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""Returns a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""Sets the prefix for the node (see Leaf class).
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def get_prefix(self):
"""Returns the prefix for the node (see Leaf class).
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new):
"""Replaces this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Returns the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""Remove the node from the tree. Returns the position of the node
in its parent's children before it was removed."""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
def get_next_sibling(self):
"""Return the node immediately following the invocant in their
parent's children list. If the invocant does not have a next
sibling, return None."""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
def get_prev_sibling(self):
"""Return the node immediately preceding the invocant in their
parent's children list. If the invocant does not have a previous
sibling, return None."""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def get_suffix(self):
"""Return the string immediately following the invocant node. This
is effectively equivalent to node.get_next_sibling().get_prefix()"""
next_sib = self.get_next_sibling()
if next_sib is None:
return ""
return next_sib.get_prefix()
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self, type, children, context=None, prefix=None):
"""Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.set_prefix(prefix)
def __repr__(self):
"""Returns a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __str__(self):
"""Returns a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Returns a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children])
def post_order(self):
"""Returns a post-order iterator for the tree."""
for child in self.children:
for node in child.post_order():
yield node
yield self
def pre_order(self):
"""Returns a pre-order iterator for the tree."""
yield self
for child in self.children:
for node in child.post_order():
yield node
def set_prefix(self, prefix):
"""Sets the prefix for the node.
This passes the responsibility on to the first child.
"""
if self.children:
self.children[0].set_prefix(prefix)
def get_prefix(self):
"""Returns the prefix for the node.
This passes the call on to the first child.
"""
if not self.children:
return ""
return self.children[0].get_prefix()
def set_child(self, i, child):
"""Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately."""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""Equivalent to 'node.children.insert(i, child)'. This method also
sets the child's parent attribute appropriately."""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""Equivalent to 'node.children.append(child)'. This method also
sets the child's parent attribute appropriately."""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None, prefix=None):
"""Initializer.
Takes a type constant (a token number < 256), a string value,
and an optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self.prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self.prefix = prefix
def __repr__(self):
"""Returns a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __str__(self):
"""Returns a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Returns a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)))
def post_order(self):
"""Returns a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Returns a pre-order iterator for the tree."""
yield self
def set_prefix(self, prefix):
"""Sets the prefix for the node."""
self.changed()
self.prefix = prefix
def get_prefix(self):
"""Returns the prefix for the node."""
return self.prefix
def convert(gr, raw_node):
"""Converts raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a
reduction of a grammar rule produces a new complete node, so that
the tree is build strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, basestring), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optinal minumum number of times to match, default 0
max: optional maximum number of times tro match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in xrange(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored.
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import pickle
import tempfile
import unittest
import warnings
import test.test_support
import test.string_tests
import test.buffer_tests
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def setUp(self):
self.warning_filters = warnings.filters[:]
def tearDown(self):
warnings.filters = self.warning_filters
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
def test_compare_to_str(self):
warnings.simplefilter('ignore', BytesWarning)
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEquals(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEquals(self.type2test.fromhex(u'1a2B30'), b)
self.assertEquals(self.type2test.fromhex(u' 1A 2B 30 '), b)
self.assertEquals(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEquals(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEquals(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(OverflowError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEquals(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEquals(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEquals(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEquals(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEquals(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEquals(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEquals(b, orig)
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
def setUp(self):
self.warning_filters = warnings.filters[:]
def tearDown(self):
warnings.filters = self.warning_filters
def test_repr_str(self):
warnings.simplefilter('ignore', BytesWarning)
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
def test_to_str(self):
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
import responses
import pytest
from plugins.fishbans import fishbans, bancount
test_user = "notch"
test_api = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":11,"service":{"mcbans":0,"mcbouncer":11,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_single = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":1,"service":{"mcbans":0,"mcbouncer":1,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_none = """
{"success":true,"stats":{"username":"notch","uuid":"069a79f444e94726a5befca90e38aaf5","totalbans":0,"service":{"mcbans":0,"mcbouncer":0,"mcblockit":0,"minebans":0,"glizer":0}}}
"""
test_api_failed = """
{"success":false}
"""
bans_reply = "The user \x02notch\x02 has \x0211\x02 bans - http://fishbans.com/u/notch/"
count_reply = "Bans for \x02notch\x02: mcbouncer: \x0211\x02 - http://fishbans.com/u/notch/"
bans_reply_single = "The user \x02notch\x02 has \x021\x02 ban - http://fishbans.com/u/notch/"
bans_reply_none = "The user \x02notch\x02 has no bans - http://fishbans.com/u/notch/"
count_reply_none = "The user \x02notch\x02 has no bans - http://fishbans.com/u/notch/"
reply_failed = "Could not fetch ban data for notch."
reply_error = "Could not fetch ban data from the Fishbans API:"
class DummyBot():
user_agent = "Ralybot/1.0"
class TestBans:
@responses.activate
def test_bans(self):
"""
tests fishbans with a successful API response having multiple bans
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api)
assert fishbans(test_user, DummyBot) == bans_reply
@responses.activate
def test_bans_single(self):
"""
tests fishbans with a successful API response having a single ban
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api_single)
assert fishbans(test_user, DummyBot) == bans_reply_single
@responses.activate
def test_bans_failed(self):
"""
tests fishbans with a failed API response
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api_failed)
assert fishbans(test_user, DummyBot) == reply_failed
@responses.activate
def test_bans_none(self):
"""
tests fishbans with a successful API response having no bans
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api_none)
assert fishbans(test_user, DummyBot) == bans_reply_none
@responses.activate
def test_bans_error(self):
"""
tests fishbans with a HTTP error
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', status=404)
assert fishbans(test_user, DummyBot).startswith(reply_error)
class TestCount:
@responses.activate
def test_count(self):
"""
tests bancount with a successful API response having multiple bans
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api)
assert bancount(test_user, DummyBot) == count_reply
@responses.activate
def test_count_failed(self):
"""
tests bancount with a failed API response
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api_failed)
assert bancount(test_user, DummyBot) == reply_failed
@responses.activate
def test_count_none(self):
"""
tests bancount with a successful API response having no bans
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', body=test_api_none)
assert bancount(test_user, DummyBot) == count_reply_none
@responses.activate
def test_count_error(self):
"""
tests bancount with a HTTP error
"""
responses.add(responses.GET, 'http://api.fishbans.com/stats/notch/', status=404)
assert bancount(test_user, DummyBot).startswith(reply_error) | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
List target types
"""
from rbuild import pluginapi
from rbuild.pluginapi import command
class ListTargetTypesCommand(command.ListCommand):
help = "List target types"
resource = "targettypes"
listFields = ("name", "description")
class TargetTypes(pluginapi.Plugin):
name = 'targettypes'
def initialize(self):
for command, subcommand, commandClass in (
('list', 'targettypes', ListTargetTypesCommand),
):
cmd = self.handle.Commands.getCommandClass(command)
cmd.registerSubCommand(subcommand, commandClass)
def list(self):
return self.handle.facade.rbuilder.getTargetTypes() | unknown | codeparrot/codeparrot-clean | ||
#include <gtest/gtest.h>
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Tensor.h>
#include <caffe2/core/tensor.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/intrusive_ptr.h>
#include <string>
namespace {
template <typename T>
class ExclusivelyOwnedTest : public ::testing::Test {
public:
c10::ExclusivelyOwned<T> defaultConstructed;
c10::ExclusivelyOwned<T> sample;
protected:
void SetUp() override; // defined below helpers
void TearDown() override {
defaultConstructed = c10::ExclusivelyOwned<T>();
sample = c10::ExclusivelyOwned<T>();
}
};
template <typename T>
T getSampleValue();
template <>
at::Tensor getSampleValue() {
return at::zeros({2, 2}).to(at::kCPU);
}
template <>
caffe2::Tensor getSampleValue() {
return caffe2::Tensor(getSampleValue<at::Tensor>());
}
template <typename T>
void assertIsSampleObject(const T& eo);
template <>
void assertIsSampleObject<at::Tensor>(const at::Tensor& t) {
EXPECT_EQ(t.sizes(), (c10::IntArrayRef{2, 2}));
EXPECT_EQ(t.strides(), (c10::IntArrayRef{2, 1}));
ASSERT_EQ(t.scalar_type(), at::ScalarType::Float);
static const float zeros[4] = {0};
EXPECT_EQ(memcmp(zeros, t.data_ptr(), 4 * sizeof(float)), 0);
}
template <>
void assertIsSampleObject<caffe2::Tensor>(const caffe2::Tensor& t) {
assertIsSampleObject<at::Tensor>(at::Tensor(t));
}
template <typename T>
void ExclusivelyOwnedTest<T>::SetUp() {
defaultConstructed = c10::ExclusivelyOwned<T>();
sample = c10::ExclusivelyOwned<T>(getSampleValue<T>());
}
using ExclusivelyOwnedTypes = ::testing::Types<
at::Tensor,
caffe2::Tensor
>;
TYPED_TEST_SUITE(ExclusivelyOwnedTest, ExclusivelyOwnedTypes);
TYPED_TEST(ExclusivelyOwnedTest, DefaultConstructor) {
c10::ExclusivelyOwned<TypeParam> defaultConstructed;
}
TYPED_TEST(ExclusivelyOwnedTest, MoveConstructor) {
auto movedDefault = std::move(this->defaultConstructed);
auto movedSample = std::move(this->sample);
assertIsSampleObject(*movedSample);
}
TYPED_TEST(ExclusivelyOwnedTest, MoveAssignment) {
// Move assignment from a default-constructed ExclusivelyOwned is handled in
// TearDown at the end of every test!
c10::ExclusivelyOwned<TypeParam> anotherSample = c10::ExclusivelyOwned<TypeParam>(getSampleValue<TypeParam>());
anotherSample = std::move(this->sample);
assertIsSampleObject(*anotherSample);
}
TYPED_TEST(ExclusivelyOwnedTest, MoveAssignmentFromContainedType) {
c10::ExclusivelyOwned<TypeParam> anotherSample = c10::ExclusivelyOwned<TypeParam>(getSampleValue<TypeParam>());
anotherSample = getSampleValue<TypeParam>();
assertIsSampleObject(*anotherSample);
}
TYPED_TEST(ExclusivelyOwnedTest, Take) {
auto x = std::move(this->sample).take();
assertIsSampleObject(x);
}
} // namespace
extern "C" void inspectTensor() {
auto t = getSampleValue<at::Tensor>();
}
extern "C" void inspectExclusivelyOwnedTensor() {
c10::ExclusivelyOwned<at::Tensor> t(getSampleValue<at::Tensor>());
} | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/test/ExclusivelyOwned_test.cpp |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cint, cstr, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"entries": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
tname = 'Sales Invoice Item'
fname = 'entries'
def __init__(self, arg1, arg2=None):
super(SalesInvoice, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def validate(self):
super(SalesInvoice, self).validate()
self.validate_posting_time()
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "qty")
self.check_stop_sales_order("sales_order")
self.validate_customer_account()
self.validate_debit_acc()
self.validate_fixed_asset_account()
self.clear_unallocated_advances("Sales Invoice Advance", "advance_adjustment_details")
self.validate_advance_jv("advance_adjustment_details", "sales_order")
self.add_remarks()
if cint(self.is_pos):
self.validate_pos()
self.validate_write_off_account()
if cint(self.update_stock):
self.validate_item_code()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
self.set_aging_date()
frappe.get_doc("Account", self.debit_to).validate_due_date(self.posting_date, self.due_date)
self.set_against_income_account()
self.validate_c_form()
self.validate_time_logs_are_submitted()
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount",
"delivery_note_details")
def on_submit(self):
super(SalesInvoice, self).on_submit()
if cint(self.update_stock) == 1:
self.update_stock_ledger()
else:
# Check for Approving Authority
if not self.recurring_id:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.grand_total, self)
self.check_prev_docstatus()
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
# this sequence because outstanding may get -ve
self.make_gl_entries()
self.check_credit_limit(self.debit_to)
if not cint(self.is_pos) == 1:
self.update_against_document_in_jv()
self.update_time_log_batch(self.name)
def before_cancel(self):
self.update_time_log_batch(None)
def on_cancel(self):
if cint(self.update_stock) == 1:
self.update_stock_ledger()
self.check_stop_sales_order("sales_order")
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_invoice")
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.validate_c_form_on_cancel()
self.make_gl_entries_on_cancel()
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.append({
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'prevdoc_detail_docname',
'overflow_type': 'delivery'
})
def get_portal_page(self):
return "invoice" if self.docstatus==1 else None
def set_missing_values(self, for_validate=False):
self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account(self.company, self.customer, "Customer")
if not self.due_date:
self.due_date = get_due_date(self.posting_date, self.customer, "Customer",
self.debit_to, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
def update_time_log_batch(self, sales_invoice):
for d in self.get(self.fname):
if d.time_log_batch:
tlb = frappe.get_doc("Time Log Batch", d.time_log_batch)
tlb.sales_invoice = sales_invoice
tlb.ignore_validate_update_after_submit = True
tlb.save()
def validate_time_logs_are_submitted(self):
for d in self.get(self.fname):
if d.time_log_batch:
status = frappe.db.get_value("Time Log Batch", d.time_log_batch, "status")
if status!="Submitted":
frappe.throw(_("Time Log Batch {0} must be 'Submitted'").format(d.time_log_batch))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from pos settings"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_settings_item_details, get_pos_settings
pos = get_pos_settings(self.company)
if pos:
if not for_validate and not self.customer:
self.customer = pos.customer
# self.set_customer_defaults()
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("entries"):
if item.get('item_code'):
for fname, val in get_pos_settings_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("other_charges")):
self.set_taxes("other_charges", "taxes_and_charges")
def get_advances(self):
super(SalesInvoice, self).get_advances(self.debit_to,
"Sales Invoice Advance", "advance_adjustment_details", "credit", "sales_order")
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
lst = []
for d in self.get('advance_adjustment_details'):
if flt(d.allocated_amount) > 0:
args = {
'voucher_no' : d.journal_voucher,
'voucher_detail_no' : d.jv_detail_no,
'against_voucher_type' : 'Sales Invoice',
'against_voucher' : self.name,
'account' : self.debit_to,
'is_advance' : 'Yes',
'dr_or_cr' : 'credit',
'unadjusted_amt' : flt(d.advance_amount),
'allocated_amt' : flt(d.allocated_amount)
}
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_customer_account(self):
"""Validates Debit To Account and Customer Matches"""
if self.customer and self.debit_to and not cint(self.is_pos):
acc_head = frappe.db.sql("select master_name from `tabAccount` where name = %s and docstatus != 2", self.debit_to)
if (acc_head and cstr(acc_head[0][0]) != cstr(self.customer)) or \
(not acc_head and (self.debit_to != cstr(self.customer) + " - " + self.get_company_abbr())):
msgprint("Debit To: %s do not match with Customer: %s for Company: %s.\n If both correctly entered, please select Master Type \
and Master Name in account master." %(self.debit_to, self.customer,self.company), raise_exception=1)
def validate_debit_acc(self):
if frappe.db.get_value("Account", self.debit_to, "report_type") != "Balance Sheet":
frappe.throw(_("Account must be a balance sheet account"))
def validate_fixed_asset_account(self):
"""Validate Fixed Asset and whether Income Account Entered Exists"""
for d in self.get('entries'):
item = frappe.db.sql("""select name,is_asset_item,is_sales_item from `tabItem`
where name = %s""", d.item_code)
acc = frappe.db.sql("""select account_type from `tabAccount`
where name = %s and docstatus != 2""", d.income_account)
if item and item[0][1] == 'Yes' and acc and acc[0][0] != 'Fixed Asset':
msgprint(_("Account {0} must be of type 'Fixed Asset' as Item {1} is an Asset Item").format(acc[0][0], d.item_code), raise_exception=True)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc(self.tname, {
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
})
if cint(frappe.defaults.get_global_default('maintain_same_sales_rate')):
super(SalesInvoice, self).validate_with_previous_doc(self.tname, {
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["rate", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Delivery Note Item": {
"ref_dn_field": "dn_detail",
"compare_fields": [["rate", "="]],
"is_child_table": True
}
})
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
elif not self.aging_date:
throw(_("Ageing Date is mandatory for opening entry"))
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('entries'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':'so_required','Delivery Note':'dn_required'}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i]) == 'Yes':
for d in self.get('entries'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 'Yes' \
and not d.get(i.lower().replace(' ','_')):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project_name and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project_name, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project_name))
def validate_pos(self):
if not self.cash_bank_account and flt(self.paid_amount):
frappe.throw(_("Cash or Bank Account is mandatory for making payment entry"))
if flt(self.paid_amount) + flt(self.write_off_amount) \
- flt(self.grand_total) > 1/(10**(self.precision("grand_total") + 1)):
frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total"""))
def validate_item_code(self):
for d in self.get('entries'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_delivery_note(self):
for d in self.get("entries"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def validate_c_form_on_cancel(self):
""" Display message if C-Form no exists on cancellation of Sales Invoice"""
if self.c_form_applicable == 'Yes' and self.c_form_no:
msgprint(_("Please remove this Invoice {0} from C-Form {1}")
.format(self.name, self.c_form_no), raise_exception = 1)
def update_current_stock(self):
for d in self.get('entries'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packing_details'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def get_warehouse(self):
user_pos_setting = frappe.db.sql("""select name, warehouse from `tabPOS Setting`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_setting[0][1] if user_pos_setting else None
if not warehouse:
global_pos_setting = frappe.db.sql("""select name, warehouse from `tabPOS Setting`
where ifnull(user,'') = '' and company = %s""", self.company)
if global_pos_setting:
warehouse = global_pos_setting[0][1]
elif not user_pos_setting:
msgprint(_("POS Setting required to make POS Entry"), raise_exception=True)
return warehouse
def on_update(self):
if cint(self.update_stock) == 1:
# Set default warehouse from pos setting
if cint(self.is_pos) == 1:
w = self.get_warehouse()
if w:
for d in self.get('entries'):
if not d.warehouse:
d.warehouse = cstr(w)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self, 'entries')
else:
self.set('packing_details', [])
if cint(self.is_pos) == 1:
if flt(self.paid_amount) == 0:
if self.cash_bank_account:
frappe.db.set(self, 'paid_amount',
(flt(self.grand_total) - flt(self.write_off_amount)))
else:
# show message that the amount is not paid
frappe.db.set(self,'paid_amount',0)
frappe.msgprint(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
def check_prev_docstatus(self):
for d in self.get('entries'):
if d.sales_order:
submitted = frappe.db.sql("""select name from `tabSales Order`
where docstatus = 1 and name = %s""", d.sales_order)
if not submitted:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note:
submitted = frappe.db.sql("""select name from `tabDelivery Note`
where docstatus = 1 and name = %s""", d.delivery_note)
if not submitted:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def update_stock_ledger(self):
sl_entries = []
for d in self.get_item_list():
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes" \
and d.warehouse:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": -1*flt(d.qty),
"stock_uom": frappe.db.get_value("Item", d.item_code, "stock_uom")
}))
self.make_sl_entries(sl_entries)
def make_gl_entries(self, repost_future_gle=True):
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
update_outstanding = cint(self.is_pos) and self.write_off_account \
and 'No' or 'Yes'
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, self.doctype, self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
if self.grand_total:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.against_income_account,
"debit": self.grand_total,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("other_charges"):
if flt(tax.tax_amount_after_discount_amount):
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.debit_to,
"credit": flt(tax.tax_amount_after_discount_amount),
"remarks": self.remarks,
"cost_center": tax.cost_center
})
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("entries"):
if flt(item.base_amount):
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.debit_to,
"credit": item.base_amount,
"remarks": self.remarks,
"cost_center": item.cost_center
})
)
# expense account gl entries
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and cint(self.update_stock):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos) and self.cash_bank_account and self.paid_amount:
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.cash_bank_account,
"credit": self.paid_amount,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
gl_entries.append(
self.get_gl_dict({
"account": self.cash_bank_account,
"against": self.debit_to,
"debit": self.paid_amount,
"remarks": self.remarks,
})
)
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.write_off_account,
"credit": self.write_off_amount,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.debit_to,
"debit": self.write_off_amount,
"remarks": self.remarks,
"cost_center": self.write_off_cost_center
})
)
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment):
val = frappe.db.get_value("Mode of Payment", mode_of_payment, "default_account")
if not val:
frappe.msgprint(_("Please set default Cash or Bank account in Mode of Payment {0}").format(mode_of_payment))
return {
"cash_bank_account": val
}
@frappe.whitelist()
def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
return frappe.db.sql("""select tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type = "Income Account")
and tabAccount.group_or_ledger="Ledger"
and tabAccount.docstatus!=2
and ifnull(tabAccount.master_type, "")=""
and ifnull(tabAccount.master_name, "")=""
and tabAccount.company = '%(company)s'
and tabAccount.%(key)s LIKE '%(txt)s'
%(mcond)s""" % {'company': filters['company'], 'key': searchfield,
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype)})
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.base_amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.base_rate)
target_doc.amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.rate)
target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "against_sales_invoice",
"serial_no": "serial_no"
},
"postprocess": update_item
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for BiT."""
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
convert_to_rgb,
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
logger = logging.get_logger(__name__)
if is_vision_available():
import PIL
class BitImageProcessor(BaseImageProcessor):
r"""
Constructs a BiT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize:
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: dict[str, int] | None = None,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_center_crop: bool | None = None,
crop_size: int | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["BitImageProcessor"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/bit/image_processing_bit.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 128], N=[32, 64], K=[256, 512], device=["cpu", "cuda"], tags=["long"]
)
add_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
"device": ["cpu", "cuda"],
},
tags=["short"],
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(
M, N, K, device=device, requires_grad=self.auto_set()
),
"input_two": torch.rand(
M, N, K, device=device, requires_grad=self.auto_set()
),
}
self.set_module_name("add")
def forward(self, input_one, input_two):
return torch.add(input_one, input_two)
# The generated test names based on add_short_configs will be in the following pattern:
# add_M8_N16_K32_devicecpu
# add_M8_N16_K32_devicecpu_bwdall
# add_M8_N16_K32_devicecpu_bwd1
# add_M8_N16_K32_devicecpu_bwd2
# ...
# Those names can be used to filter tests.
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark)
"""Mircobenchmark for addr operator."""
class AddrBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device, dtype):
self.inputs = {
"input_one": torch.rand(
(M, N), device=device, requires_grad=self.auto_set(), dtype=dtype
),
"vec1": torch.rand(
(M,), device=device, requires_grad=self.auto_set(), dtype=dtype
),
"vec2": torch.rand(
(N,), device=device, requires_grad=self.auto_set(), dtype=dtype
),
}
self.set_module_name("addr")
def forward(self, input_one, vec1, vec2):
return torch.addr(input_one, vec1, vec2)
addr_configs = op_bench.cross_product_configs(
M=[8, 256],
N=[256, 16],
device=["cpu", "cuda"],
dtype=[torch.double, torch.half],
tags=["addr"],
)
op_bench.generate_pt_test(addr_configs, AddrBenchmark)
op_bench.generate_pt_gradient_test(addr_configs, AddrBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main() | python | github | https://github.com/pytorch/pytorch | benchmarks/operator_benchmark/pt/add_test.py |
/*
* contrib/btree_gist/btree_utils_num.h
*/
#ifndef __BTREE_UTILS_NUM_H__
#define __BTREE_UTILS_NUM_H__
#include <math.h>
#include <float.h>
#include "access/gist.h"
#include "btree_gist.h"
typedef char GBT_NUMKEY;
/* Better readable key */
typedef struct
{
const GBT_NUMKEY *lower,
*upper;
} GBT_NUMKEY_R;
/* for sorting */
typedef struct
{
int i;
GBT_NUMKEY *t;
} Nsrt;
/* type description */
typedef struct
{
/* Attribs */
enum gbtree_type t; /* data type */
int32 size; /* size of type, 0 means variable */
int32 indexsize; /* size of datums stored in index */
/* Methods */
bool (*f_gt) (const void *, const void *, FmgrInfo *); /* greater than */
bool (*f_ge) (const void *, const void *, FmgrInfo *); /* greater or equal */
bool (*f_eq) (const void *, const void *, FmgrInfo *); /* equal */
bool (*f_le) (const void *, const void *, FmgrInfo *); /* less or equal */
bool (*f_lt) (const void *, const void *, FmgrInfo *); /* less than */
int (*f_cmp) (const void *, const void *, FmgrInfo *); /* key compare function */
float8 (*f_dist) (const void *, const void *, FmgrInfo *); /* key distance function */
} gbtree_ninfo;
/*
* Numeric btree functions
*/
/*
* Note: The factor 0.49 in following macro avoids floating point overflows
*/
#define penalty_num(result,olower,oupper,nlower,nupper) do { \
double tmp = 0.0F; \
(*(result)) = 0.0F; \
if ( (nupper) > (oupper) ) \
tmp += ( ((double)nupper)*0.49F - ((double)oupper)*0.49F ); \
if ( (olower) > (nlower) ) \
tmp += ( ((double)olower)*0.49F - ((double)nlower)*0.49F ); \
if (tmp > 0.0F) \
{ \
(*(result)) += FLT_MIN; \
(*(result)) += (float) ( ((double)(tmp)) / ( (double)(tmp) + ( ((double)(oupper))*0.49F - ((double)(olower))*0.49F ) ) ); \
(*(result)) *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); \
} \
} while (0)
/*
* Convert an Interval to an approximate equivalent number of seconds
* (as a double). Here because we need it for time/timetz as well as
* interval. See interval_cmp_internal for comparison.
*/
#define INTERVAL_TO_SEC(ivp) \
(((double) (ivp)->time) / ((double) USECS_PER_SEC) + \
(ivp)->day * (24.0 * SECS_PER_HOUR) + \
(ivp)->month * (30.0 * SECS_PER_DAY))
#define GET_FLOAT_DISTANCE(t, arg1, arg2) fabs( ((float8) *((const t *) (arg1))) - ((float8) *((const t *) (arg2))) )
extern Interval *abs_interval(Interval *a);
extern bool gbt_num_consistent(const GBT_NUMKEY_R *key, const void *query,
const StrategyNumber *strategy, bool is_leaf,
const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query,
bool is_leaf, const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v,
const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern GISTENTRY *gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo);
extern GISTENTRY *gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo);
extern void *gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec,
const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern bool gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b,
const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern void gbt_num_bin_union(Datum *u, GBT_NUMKEY *e,
const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
#endif | c | github | https://github.com/postgres/postgres | contrib/btree_gist/btree_utils_num.h |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
convertor_registry = {}
missing = object()
no_default = object()
class log_action(object):
def __init__(self, *args):
self.args = {}
self.args_no_default = []
self.args_with_default = []
# These are the required fields in a log message that usually aren't
# supplied by the caller, but can be in the case of log_raw
self.default_args = [
Unicode("action"),
Int("time"),
Unicode("thread"),
Int("pid", default=None),
Unicode("source"),
Unicode("component")]
for arg in args:
if arg.default is no_default:
self.args_no_default.append(arg.name)
else:
self.args_with_default.append(arg.name)
if arg.name in self.args:
raise ValueError("Repeated argument name %s" % arg.name)
self.args[arg.name] = arg
for extra in self.default_args:
self.args[extra.name] = extra
def __call__(self, f):
convertor_registry[f.__name__] = self
converter = self
def inner(self, *args, **kwargs):
data = converter.convert(*args, **kwargs)
return f(self, data)
if hasattr(f, '__doc__'):
setattr(inner, '__doc__', f.__doc__)
return inner
def convert(self, *args, **kwargs):
data = {}
values = {}
values.update(kwargs)
positional_no_default = [item for item in self.args_no_default if item not in values]
num_no_default = len(positional_no_default)
if len(args) < num_no_default:
raise TypeError("Too few arguments")
if len(args) > num_no_default + len(self.args_with_default):
raise TypeError("Too many arguments")
for i, name in enumerate(positional_no_default):
values[name] = args[i]
positional_with_default = [self.args_with_default[i]
for i in range(len(args) - num_no_default)]
for i, name in enumerate(positional_with_default):
if name in values:
raise TypeError("Argument %s specified twice" % name)
values[name] = args[i + num_no_default]
# Fill in missing arguments
for name in self.args_with_default:
if name not in values:
values[name] = self.args[name].default
for key, value in values.iteritems():
if key in self.args:
out_value = self.args[key](value)
if out_value is not missing:
data[key] = out_value
else:
raise TypeError("Unrecognised argument %s" % key)
return data
def convert_known(self, **kwargs):
known_kwargs = {name: value for name, value in kwargs.iteritems()
if name in self.args}
return self.convert(**known_kwargs)
class DataType(object):
def __init__(self, name, default=no_default, optional=False):
self.name = name
self.default = default
if default is no_default and optional is not False:
raise ValueError("optional arguments require a default value")
self.optional = optional
def __call__(self, value):
if value == self.default:
if self.optional:
return missing
return self.default
try:
return self.convert(value)
except:
raise ValueError("Failed to convert value %s of type %s for field %s to type %s" %
(value, type(value).__name__, self.name, self.__class__.__name__))
class Unicode(DataType):
def convert(self, data):
if isinstance(data, unicode):
return data
if isinstance(data, str):
return data.decode("utf8", "replace")
return unicode(data)
class TestId(DataType):
def convert(self, data):
if isinstance(data, unicode):
return data
elif isinstance(data, str):
return data.decode("utf-8", "replace")
elif isinstance(data, tuple):
# This is really a bit of a hack; should really split out convertors from the
# fields they operate on
func = Unicode(None).convert
return tuple(func(item) for item in data)
else:
raise ValueError
class Status(DataType):
allowed = ["PASS", "FAIL", "OK", "ERROR", "TIMEOUT", "CRASH", "ASSERT", "SKIP"]
def convert(self, data):
value = data.upper()
if value not in self.allowed:
raise ValueError
return value
class SubStatus(Status):
allowed = ["PASS", "FAIL", "ERROR", "TIMEOUT", "ASSERT", "NOTRUN"]
class Dict(DataType):
def convert(self, data):
return dict(data)
class List(DataType):
def __init__(self, name, item_type, default=no_default, optional=False):
DataType.__init__(self, name, default, optional)
self.item_type = item_type(None)
def convert(self, data):
return [self.item_type.convert(item) for item in data]
class Int(DataType):
def convert(self, data):
return int(data)
class Any(DataType):
def convert(self, data):
return data | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
test_utility_functions
~~~~~~~~~~~~~~~~~~~~~~
Tests for the various utility functions provided by hyper-h2.
"""
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
from h2.utilities import extract_method_header
# These tests require a non-list-returning range function.
try:
range = xrange
except NameError:
range = range
class TestGetNextAvailableStreamID(object):
"""
Tests for the ``H2Connection.get_next_available_stream_id`` method.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
server_config = h2.config.H2Configuration(client_side=False)
def test_returns_correct_sequence_for_clients(self, frame_factory):
"""
For a client connection, the correct sequence of stream IDs is
returned.
"""
# Running the exhaustive version of this test (all 1 billion available
# stream IDs) is too painful. For that reason, we validate that the
# original sequence is right for the first few thousand, and then just
# check that it terminates properly.
#
# Make sure that the streams get cleaned up: 8k streams floating
# around would make this test memory-hard, and it's not supposed to be
# a test of how much RAM your machine has.
c = h2.connection.H2Connection()
c.initiate_connection()
initial_sequence = range(1, 2**13, 2)
for expected_stream_id in initial_sequence:
stream_id = c.get_next_available_stream_id()
assert stream_id == expected_stream_id
c.send_headers(
stream_id=stream_id,
headers=self.example_request_headers,
end_stream=True
)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
stream_id=stream_id,
flags=['END_STREAM'],
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
# Jump up to the last available stream ID. Don't clean up the stream
# here because who cares about one stream.
last_client_id = 2**31 - 1
c.send_headers(
stream_id=last_client_id,
headers=self.example_request_headers,
end_stream=True
)
with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
c.get_next_available_stream_id()
def test_returns_correct_sequence_for_servers(self, frame_factory):
"""
For a server connection, the correct sequence of stream IDs is
returned.
"""
# Running the exhaustive version of this test (all 1 billion available
# stream IDs) is too painful. For that reason, we validate that the
# original sequence is right for the first few thousand, and then just
# check that it terminates properly.
#
# Make sure that the streams get cleaned up: 8k streams floating
# around would make this test memory-hard, and it's not supposed to be
# a test of how much RAM your machine has.
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
headers=self.example_request_headers
)
c.receive_data(f.serialize())
initial_sequence = range(2, 2**13, 2)
for expected_stream_id in initial_sequence:
stream_id = c.get_next_available_stream_id()
assert stream_id == expected_stream_id
c.push_stream(
stream_id=1,
promised_stream_id=stream_id,
request_headers=self.example_request_headers
)
c.send_headers(
stream_id=stream_id,
headers=self.example_response_headers,
end_stream=True
)
c.clear_outbound_data_buffer()
# Jump up to the last available stream ID. Don't clean up the stream
# here because who cares about one stream.
last_server_id = 2**31 - 2
c.push_stream(
stream_id=1,
promised_stream_id=last_server_id,
request_headers=self.example_request_headers,
)
with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
c.get_next_available_stream_id()
def test_does_not_increment_without_stream_send(self):
"""
If a new stream isn't actually created, the next stream ID doesn't
change.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
first_stream_id = c.get_next_available_stream_id()
second_stream_id = c.get_next_available_stream_id()
assert first_stream_id == second_stream_id
c.send_headers(
stream_id=first_stream_id,
headers=self.example_request_headers
)
third_stream_id = c.get_next_available_stream_id()
assert third_stream_id == (first_stream_id + 2)
class TestExtractHeader(object):
example_request_headers = [
(u':authority', u'example.com'),
(u':path', u'/'),
(u':scheme', u'https'),
(u':method', u'GET'),
]
example_headers_with_bytes = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
]
@pytest.mark.parametrize(
'headers', [example_request_headers, example_headers_with_bytes]
)
def test_extract_header_method(self, headers):
assert extract_method_header(headers) == b'GET' | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2010 Philip Taylor
# Released under the BSD license and W3C Test Suite License: see LICENSE.txt
# Current code status:
#
# This was originally written for use at
# http://philip.html5.org/tests/canvas/suite/tests/
#
# It has been adapted for use with the Web Platform Test Suite suite at
# https://github.com/w3c/web-platform-tests/
#
# The W3C version excludes a number of features (multiple versions of each test
# case of varying verbosity, Mozilla mochitests, semi-automated test harness)
# to focus on simply providing reviewable test cases. It also expects a different
# directory structure.
# This code attempts to support both versions, but the non-W3C version hasn't
# been tested recently and is probably broken.
# To update or add test cases:
#
# * Modify the tests*.yaml files.
# 'name' is an arbitrary hierarchical name to help categorise tests.
# 'desc' is a rough description of what behaviour the test aims to test.
# 'testing' is a list of references to spec.yaml, to show which spec sentences
# this test case is primarily testing.
# 'code' is JavaScript code to execute, with some special commands starting with '@'
# 'expected' is what the final canvas output should be: a string 'green' or 'clear'
# (100x50 images in both cases), or a string 'size 100 50' (or any other size)
# followed by Python code using Pycairo to generate the image.
#
# * Run "python gentest.py".
# This requires a few Python modules which might not be ubiquitous.
# It has only been tested on Linux.
# It will usually emit some warnings, which ideally should be fixed but can
# generally be safely ignored.
#
# * Test the tests, add new ones to Git, remove deleted ones from Git, etc.
import re
import codecs
import time
import os
import shutil
import sys
import xml.dom.minidom
from xml.dom.minidom import Node
try:
import cairocffi as cairo
except ImportError:
import cairo
try:
import syck as yaml # compatible and lots faster
except ImportError:
import yaml
def genTestUtils(TESTOUTPUTDIR, IMAGEOUTPUTDIR, TEMPLATEFILE, NAME2DIRFILE, ISOFFSCREENCANVAS):
# Default mode is for the W3C test suite; the --standalone option
# generates various extra files that aren't needed there
W3CMODE = True
if '--standalone' in sys.argv:
W3CMODE = False
MISCOUTPUTDIR = './output'
SPECOUTPUTDIR = '../../annotated-spec'
SPECOUTPUTPATH = '../annotated-spec' # relative to TESTOUTPUTDIR
def simpleEscapeJS(str):
return str.replace('\\', '\\\\').replace('"', '\\"')
def escapeJS(str):
str = simpleEscapeJS(str)
str = re.sub(r'\[(\w+)\]', r'[\\""+(\1)+"\\"]', str) # kind of an ugly hack, for nicer failure-message output
return str
def escapeHTML(str):
return str.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def expand_nonfinite(method, argstr, tail):
"""
>>> print expand_nonfinite('f', '<0 a>, <0 b>', ';')
f(a, 0);
f(0, b);
f(a, b);
>>> print expand_nonfinite('f', '<0 a>, <0 b c>, <0 d>', ';')
f(a, 0, 0);
f(0, b, 0);
f(0, c, 0);
f(0, 0, d);
f(a, b, 0);
f(a, b, d);
f(a, 0, d);
f(0, b, d);
"""
# argstr is "<valid-1 invalid1-1 invalid2-1 ...>, ..." (where usually
# 'invalid' is Infinity/-Infinity/NaN)
args = []
for arg in argstr.split(', '):
a = re.match('<(.*)>', arg).group(1)
args.append(a.split(' '))
calls = []
# Start with the valid argument list
call = [ args[j][0] for j in range(len(args)) ]
# For each argument alone, try setting it to all its invalid values:
for i in range(len(args)):
for a in args[i][1:]:
c2 = call[:]
c2[i] = a
calls.append(c2)
# For all combinations of >= 2 arguments, try setting them to their
# first invalid values. (Don't do all invalid values, because the
# number of combinations explodes.)
def f(c, start, depth):
for i in range(start, len(args)):
if len(args[i]) > 1:
a = args[i][1]
c2 = c[:]
c2[i] = a
if depth > 0: calls.append(c2)
f(c2, i+1, depth+1)
f(call, 0, 0)
return '\n'.join('%s(%s)%s' % (method, ', '.join(c), tail) for c in calls)
# Run with --test argument to run unit tests
if len(sys.argv) > 1 and sys.argv[1] == '--test':
import doctest
doctest.testmod()
sys.exit()
templates = yaml.load(open(TEMPLATEFILE, "r").read())
name_mapping = yaml.load(open(NAME2DIRFILE, "r").read())
SPECFILE = 'spec.yaml'
if ISOFFSCREENCANVAS:
SPECFILE = '../../2dcontext/tools/spec.yaml'
spec_assertions = []
for s in yaml.load(open(SPECFILE, "r").read())['assertions']:
if 'meta' in s:
eval(compile(s['meta'], '<meta spec assertion>', 'exec'), {}, {'assertions':spec_assertions})
else:
spec_assertions.append(s)
tests = []
TESTSFILES = ['tests.yaml', 'tests2d.yaml', 'tests2dtext.yaml']
if ISOFFSCREENCANVAS:
TESTSFILES = ['tests2d.yaml']
for t in sum([ yaml.load(open(f, "r").read()) for f in TESTSFILES], []):
if 'DISABLED' in t:
continue
if 'meta' in t:
eval(compile(t['meta'], '<meta test>', 'exec'), {}, {'tests':tests})
else:
tests.append(t)
category_names = []
category_contents_direct = {}
category_contents_all = {}
spec_ids = {}
for t in spec_assertions: spec_ids[t['id']] = True
spec_refs = {}
def backref_html(name):
backrefs = []
c = ''
for p in name.split('.')[:-1]:
c += '.'+p
backrefs.append('<a href="index%s.html">%s</a>.' % (c, p))
backrefs.append(name.split('.')[-1])
return ''.join(backrefs)
def make_flat_image(filename, w, h, r,g,b,a):
if os.path.exists('%s/%s' % (IMAGEOUTPUTDIR, filename)):
return filename
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
cr = cairo.Context(surface)
cr.set_source_rgba(r, g, b, a)
cr.rectangle(0, 0, w, h)
cr.fill()
surface.write_to_png('%s/%s' % (IMAGEOUTPUTDIR, filename))
return filename
# Ensure the test output directories exist
testdirs = [TESTOUTPUTDIR, IMAGEOUTPUTDIR, MISCOUTPUTDIR]
if not W3CMODE: testdirs.append('%s/mochitests' % MISCOUTPUTDIR)
else:
for map_dir in set(name_mapping.values()):
testdirs.append("%s/%s" % (TESTOUTPUTDIR, map_dir))
for d in testdirs:
try: os.mkdir(d)
except: pass # ignore if it already exists
mochitests = []
used_images = {}
def expand_test_code(code):
code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code) # must come before '@assert throws'
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'_assertPixel(offscreenCanvas, \1, \2, "\1", "\2");',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'_assertPixel(canvas, \1, \2, "\1", "\2");',
code)
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'_assertPixelApprox(offscreenCanvas, \1, \2, "\1", "\2", 2);',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", 2);',
code)
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'_assertPixelApprox(offscreenCanvas, \1, \2, "\1", "\2", \3);',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", \3);',
code)
code = re.sub(r'@assert throws (\S+_ERR) (.*);',
r'assert_throws("\1", function() { \2; });',
code)
code = re.sub(r'@assert throws (\S+Error) (.*);',
r'assert_throws(new \1(), function() { \2; });',
code)
code = re.sub(r'@assert (.*) === (.*);',
lambda m: '_assertSame(%s, %s, "%s", "%s");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*) !== (.*);',
lambda m: '_assertDifferent(%s, %s, "%s", "%s");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*) =~ (.*);',
lambda m: 'assert_regexp_match(%s, %s);'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'@assert (.*);',
lambda m: '_assert(%s, "%s");'
% (m.group(1), escapeJS(m.group(1)))
, code)
code = re.sub(r' @moz-todo', '', code)
code = re.sub(r'@moz-UniversalBrowserRead;',
""
, code)
assert('@' not in code)
return code
def expand_mochitest_code(code):
code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code)
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", 0);',
code)
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", 2);',
code)
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", \3);',
code)
code = re.sub(r'@assert throws (\S+_ERR) (.*);',
lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && _thrown.code == DOMException.%s, "should throw %s");'
% (m.group(2), m.group(1), m.group(1))
, code)
code = re.sub(r'@assert throws (\S+Error) (.*);',
lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && (_thrown instanceof %s), "should throw %s");'
% (m.group(2), m.group(1), m.group(1))
, code)
code = re.sub(r'@assert throws (.*);',
lambda m: 'try { var _thrown = false;\n %s;\n} catch (e) { _thrown = true; } finally { ok(_thrown, "should throw exception"); }'
% (m.group(1))
, code)
code = re.sub(r'@assert (.*) =~ (.*);',
lambda m: 'ok(%s.match(%s), "%s.match(%s)");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*);',
lambda m: 'ok(%s, "%s");'
% (m.group(1), escapeJS(m.group(1)))
, code)
code = re.sub(r'((?:^|\n|;)\s*)ok(.*;) @moz-todo',
lambda m: '%stodo%s'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'((?:^|\n|;)\s*)(is.*;) @moz-todo',
lambda m: '%stodo_%s'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'@moz-UniversalBrowserRead;',
"netscape.security.PrivilegeManager.enablePrivilege('UniversalBrowserRead');"
, code)
code = code.replace('../images/', 'image_')
assert '@' not in code, '@ not in code:\n%s' % code
return code
used_tests = {}
for i in range(len(tests)):
test = tests[i]
name = test['name']
print "\r(%s)" % name, " "*32, "\t",
if name in used_tests:
print "Test %s is defined twice" % name
used_tests[name] = 1
mapped_name = None
for mn in sorted(name_mapping.keys(), key=len, reverse=True):
if name.startswith(mn):
mapped_name = "%s/%s" % (name_mapping[mn], name)
break
if not mapped_name:
print "LIKELY ERROR: %s has no defined target directory mapping" % name
if ISOFFSCREENCANVAS:
continue
else:
mapped_name = name
if 'manual' in test:
mapped_name += "-manual"
cat_total = ''
for cat_part in [''] + name.split('.')[:-1]:
cat_total += cat_part+'.'
if not cat_total in category_names: category_names.append(cat_total)
category_contents_all.setdefault(cat_total, []).append(name)
category_contents_direct.setdefault(cat_total, []).append(name)
for ref in test.get('testing', []):
if ref not in spec_ids:
print "Test %s uses nonexistent spec point %s" % (name, ref)
spec_refs.setdefault(ref, []).append(name)
#if not (len(test.get('testing', [])) or 'mozilla' in test):
if not test.get('testing', []):
print "Test %s doesn't refer to any spec points" % name
if test.get('expected', '') == 'green' and re.search(r'@assert pixel .* 0,0,0,0;', test['code']):
print "Probable incorrect pixel test in %s" % name
code = expand_test_code(test['code'])
mochitest = not (W3CMODE or 'manual' in test or 'disabled' in test.get('mozilla', {}))
if mochitest:
mochi_code = expand_mochitest_code(test['code'])
mochi_name = name
if 'mozilla' in test:
if 'throws' in test['mozilla']:
mochi_code = templates['mochitest.exception'] % mochi_code
if 'bug' in test['mozilla']:
mochi_name = "%s - bug %s" % (name, test['mozilla']['bug'])
if 'desc' in test:
mochi_desc = '<!-- Testing: %s -->\n' % test['desc']
else:
mochi_desc = ''
if 'deferTest' in mochi_code:
mochi_setup = ''
mochi_footer = ''
else:
mochi_setup = ''
mochi_footer = 'SimpleTest.finish();\n'
for f in ['isPixel', 'todo_isPixel', 'deferTest', 'wrapFunction']:
if f in mochi_code:
mochi_setup += templates['mochitest.%s' % f]
else:
if not W3CMODE:
print "Skipping mochitest for %s" % name
mochi_name = ''
mochi_desc = ''
mochi_code = ''
mochi_setup = ''
mochi_footer = ''
expectation_html = ''
if 'expected' in test and test['expected'] is not None:
expected = test['expected']
expected_img = None
if expected == 'green':
expected_img = make_flat_image('green-100x50.png', 100, 50, 0,1,0,1)
if W3CMODE: expected_img = "/images/" + expected_img
elif expected == 'clear':
expected_img = make_flat_image('clear-100x50.png', 100, 50, 0,0,0,0)
if W3CMODE: expected_img = "/images/" + expected_img
else:
if ';' in expected: print "Found semicolon in %s" % name
expected = re.sub(r'^size (\d+) (\d+)',
r'surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, \1, \2)\ncr = cairo.Context(surface)',
expected)
if mapped_name.endswith("-manual"):
png_name = mapped_name[:-len("-manual")]
else:
png_name = mapped_name
expected += "\nsurface.write_to_png('%s/%s.png')\n" % (IMAGEOUTPUTDIR, png_name)
eval(compile(expected, '<test %s>' % test['name'], 'exec'), {}, {'cairo':cairo})
expected_img = "%s.png" % name
if expected_img:
expectation_html = ('<p class="output expectedtext">Expected output:' +
'<p><img src="%s" class="output expected" id="expected" alt="">' % (expected_img))
canvas = test.get('canvas', 'width="100" height="50"')
prev = tests[i-1]['name'] if i != 0 else 'index'
next = tests[i+1]['name'] if i != len(tests)-1 else 'index'
name_wrapped = name.replace('.', '.​') # (see https://bugzilla.mozilla.org/show_bug.cgi?id=376188)
refs = ''.join('<li><a href="%s/canvas.html#testrefs.%s">%s</a>\n' % (SPECOUTPUTPATH, n,n) for n in test.get('testing', []))
if not W3CMODE and 'mozilla' in test and 'bug' in test['mozilla']:
refs += '<li><a href="https://bugzilla.mozilla.org/show_bug.cgi?id=%d">Bugzilla</a>' % test['mozilla']['bug']
notes = '<p class="notes">%s' % test['notes'] if 'notes' in test else ''
scripts = ''
for s in test.get('scripts', []):
scripts += '<script src="%s"></script>\n' % (s)
variants = test.get('script-variants', {})
script_variants = [(v, '<script src="%s"></script>\n' % (s)) for (v, s) in variants.iteritems()]
if not script_variants:
script_variants = [('', '')]
images = ''
for i in test.get('images', []):
id = i.split('/')[-1]
if '/' not in i:
used_images[i] = 1
i = '../images/%s' % i
images += '<img src="%s" id="%s" class="resource">\n' % (i,id)
mochi_images = images.replace('../images/', 'image_')
if W3CMODE: images = images.replace("../images/", "/images/")
fonts = ''
fonthack = ''
for i in test.get('fonts', []):
fonts += '@font-face {\n font-family: %s;\n src: url("/fonts/%s.ttf");\n}\n' % (i, i)
# Browsers require the font to actually be used in the page
if test.get('fonthack', 1):
fonthack += '<span style="font-family: %s; position: absolute; visibility: hidden">A</span>\n' % i
if fonts:
fonts = '<style>\n%s</style>\n' % fonts
fallback = test.get('fallback', '<p class="fallback">FAIL (fallback content)</p>')
desc = test.get('desc', '')
escaped_desc = simpleEscapeJS(desc)
for (variant, extra_script) in script_variants:
name_variant = '' if not variant else '.' + variant
template_params = {
'name':name + name_variant,
'name_wrapped':name_wrapped, 'backrefs':backref_html(name),
'mapped_name':mapped_name,
'desc':desc, 'escaped_desc':escaped_desc,
'prev':prev, 'next':next, 'refs':refs, 'notes':notes, 'images':images,
'fonts':fonts, 'fonthack':fonthack,
'canvas':canvas, 'expected':expectation_html, 'code':code,
'scripts':scripts + extra_script,
'mochi_name':mochi_name, 'mochi_desc':mochi_desc, 'mochi_code':mochi_code,
'mochi_setup':mochi_setup, 'mochi_footer':mochi_footer, 'mochi_images':mochi_images,
'fallback':fallback
}
if W3CMODE:
f = codecs.open('%s/%s%s.html' % (TESTOUTPUTDIR, mapped_name, name_variant), 'w', 'utf-8')
f.write(templates['w3c'] % template_params)
if ISOFFSCREENCANVAS:
f = codecs.open('%s/%s%s.worker.js' % (TESTOUTPUTDIR, mapped_name, name_variant), 'w', 'utf-8')
f.write(templates['w3cworker'] % template_params)
else:
f = codecs.open('%s/%s%s.html' % (TESTOUTPUTDIR, name, name_variant), 'w', 'utf-8')
f.write(templates['standalone'] % template_params)
f = codecs.open('%s/framed.%s%s.html' % (TESTOUTPUTDIR, name, name_variant), 'w', 'utf-8')
f.write(templates['framed'] % template_params)
f = codecs.open('%s/minimal.%s%s.html' % (TESTOUTPUTDIR, name, name_variant), 'w', 'utf-8')
f.write(templates['minimal'] % template_params)
if mochitest:
mochitests.append(name)
f = codecs.open('%s/mochitests/test_%s%s.html' % (MISCOUTPUTDIR, name, name_variant), 'w', 'utf-8')
f.write(templates['mochitest'] % template_params)
def write_mochitest_makefile():
f = open('%s/mochitests/Makefile.in' % MISCOUTPUTDIR, 'w')
f.write(templates['mochitest.Makefile'])
files = ['test_%s.html' % n for n in mochitests] + ['image_%s' % n for n in used_images]
chunksize = 100
chunks = []
for i in range(0, len(files), chunksize):
chunk = files[i:i+chunksize]
name = '_TEST_FILES_%d' % (i / chunksize)
chunks.append(name)
f.write('%s = \\\n' % name)
for file in chunk: f.write('\t%s \\\n' % file)
f.write('\t$(NULL)\n\n')
f.write('# split up into groups to work around command-line length limits\n')
for name in chunks:
f.write('libs:: $(%s)\n\t$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/tests/$(relativesrcdir)\n\n' % name)
if not W3CMODE:
for i in used_images:
shutil.copyfile("../../images/%s" % i, "%s/mochitests/image_%s" % (MISCOUTPUTDIR, i))
write_mochitest_makefile()
print
def write_index():
f = open('%s/index.html' % TESTOUTPUTDIR, 'w')
f.write(templates['index.w3c' if W3CMODE else 'index'] % { 'updated':time.strftime('%Y-%m-%d', time.gmtime()) })
f.write('\n<ul class="testlist">\n')
depth = 1
for category in category_names:
name = category[1:-1] or ''
count = len(category_contents_all[category])
new_depth = category.count('.')
while new_depth < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1
f.write(' '*depth + templates['index.w3c.category.item' if W3CMODE else 'index.category.item'] % (name or 'all', name, count, '' if count==1 else 's'))
while new_depth+1 > depth: f.write(' '*depth + '<ul>\n'); depth += 1
for item in category_contents_direct.get(category, []):
f.write(' '*depth + '<li><a href="%s.html">%s</a>\n' % (item, item) )
while 0 < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1
def write_category_indexes():
for category in category_names:
name = (category[1:-1] or 'all')
f = open('%s/index.%s.html' % (TESTOUTPUTDIR, name), 'w')
f.write(templates['index.w3c.frame' if W3CMODE else 'index.frame'] % { 'backrefs':backref_html(name), 'category':name })
for item in category_contents_all[category]:
f.write(templates['index.w3c.frame.item' if W3CMODE else 'index.frame.item'] % item)
def write_reportgen():
f = open('%s/reportgen.html' % MISCOUTPUTDIR, 'w')
items_text = ',\n'.join(('"%s"' % item) for item in category_contents_all['.'])
f.write(templates['reportgen'] % {'items':items_text })
def write_results():
results = {}
uas = []
uastrings = {}
for item in category_contents_all['.']: results[item] = {}
f = open('%s/results.html' % MISCOUTPUTDIR, 'w')
f.write(templates['results'])
if not os.path.exists('results.yaml'):
print "Can't find results.yaml"
else:
for resultset in yaml.load(open('results.yaml', "r").read()):
#title = "%s (%s)" % (resultset['ua'], resultset['time'])
title = resultset['name']
#assert title not in uas # don't allow repetitions
if title not in uas:
uas.append(title)
uastrings[title] = resultset['ua']
else:
assert uastrings[title] == resultset['ua']
for r in resultset['results']:
if r['id'] not in results:
print 'Skipping results for removed test %s' % r['id']
continue
results[r['id']][title] = (
r['status'].lower(),
re.sub(r'%(..)', lambda m: chr(int(m.group(1), 16)),
re.sub(r'%u(....)', lambda m: unichr(int(m.group(1), 16)),
r['notes'])).encode('utf8')
)
passes = {}
for ua in uas:
f.write('<th title="%s">%s\n' % (uastrings[ua], ua))
passes[ua] = 0
for id in category_contents_all['.']:
f.write('<tr><td><a href="#%s" id="%s">#</a> <a href="%s.html">%s</a>\n' % (id, id, id, id))
for ua in uas:
status, details = results[id].get(ua, ('', ''))
f.write('<td class="r %s"><ul class="d">%s</ul>\n' % (status, details))
if status == 'pass': passes[ua] += 1
f.write('<tr><th>Passes\n')
for ua in uas:
f.write('<td>%.1f%%\n' % ((100.0 * passes[ua]) / len(category_contents_all['.'])))
f.write('<tr><td>\n')
for ua in uas:
f.write('<td>%s\n' % ua)
f.write('</table>\n')
def getNodeText(node):
t, offsets = '', []
# Skip over any previous annotations we added
if node.nodeType == node.ELEMENT_NODE and 'testrefs' in node.getAttribute('class').split(' '):
return t, offsets
if node.nodeType == node.TEXT_NODE:
val = node.nodeValue
val = val.replace(unichr(0xa0), ' ') # replace s
t += val
offsets += [ (node, len(node.nodeValue)) ]
for n in node.childNodes:
child_t, child_offsets = getNodeText(n)
t += child_t
offsets += child_offsets
return t, offsets
def htmlSerializer(element):
element.normalize()
rv = []
specialtext = ['style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript']
empty = ['area', 'base', 'basefont', 'bgsound', 'br', 'col', 'embed', 'frame',
'hr', 'img', 'input', 'link', 'meta', 'param', 'spacer', 'wbr']
def serializeElement(element):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
rv.append("<!DOCTYPE %s>" % element.name)
elif element.nodeType == Node.DOCUMENT_NODE:
for child in element.childNodes:
serializeElement(child)
elif element.nodeType == Node.COMMENT_NODE:
rv.append("<!--%s-->" % element.nodeValue)
elif element.nodeType == Node.TEXT_NODE:
unescaped = False
n = element.parentNode
while n is not None:
if n.nodeName in specialtext:
unescaped = True
break
n = n.parentNode
if unescaped:
rv.append(element.nodeValue)
else:
rv.append(escapeHTML(element.nodeValue))
else:
rv.append("<%s" % element.nodeName)
if element.hasAttributes():
for name, value in element.attributes.items():
rv.append(' %s="%s"' % (name, escapeHTML(value)))
rv.append(">")
if element.nodeName not in empty:
for child in element.childNodes:
serializeElement(child)
rv.append("</%s>" % element.nodeName)
serializeElement(element)
return '<!DOCTYPE html>\n' + ''.join(rv)
def write_annotated_spec():
# Load the stripped-down XHTMLised copy of the spec
doc = xml.dom.minidom.parse(open('current-work-canvas.xhtml', 'r'))
# Insert our new stylesheet
n = doc.getElementsByTagName('head')[0].appendChild(doc.createElement('link'))
n.setAttribute('rel', 'stylesheet')
n.setAttribute('href', '../common/canvas-spec.css' if W3CMODE else '../spectest.css')
n.setAttribute('type', 'text/css')
spec_assertion_patterns = []
for a in spec_assertions:
# Warn about problems
if a['id'] not in spec_refs:
print "Unused spec statement %s" % a['id']
pattern_text = a['text']
if 'keyword' in a:
# Explicit keyword override
keyword = a['keyword']
else:
# Extract the marked keywords, and remove the markers
keyword = 'none'
for kw in ['must', 'should', 'required']:
if ('*%s*' % kw) in pattern_text:
keyword = kw
pattern_text = pattern_text.replace('*%s*' % kw, kw)
break
# Make sure there wasn't >1 keyword
for kw in ['must', 'should', 'required']:
assert('*%s*' % kw not in pattern_text)
# Convert the special pattern format into regexp syntax
pattern_text = (pattern_text.
# Escape relevant characters
replace('*', r'\*').
replace('+', r'\+').
replace('.', r'\.').
replace('(', r'\(').
replace(')', r'\)').
replace('[', r'\[').
replace(']', r'\]').
# Convert special sequences back into unescaped regexp code
replace(' ', r'\s+').
replace(r'<\.\.\.>', r'.+').
replace('<^>', r'()').
replace('<eol>', r'\s*?\n')
)
pattern = re.compile(pattern_text, re.S)
spec_assertion_patterns.append( (a['id'], pattern, keyword, a.get('previously', None)) )
matched_assertions = {}
def process_element(e):
if e.nodeType == e.ELEMENT_NODE and (e.getAttribute('class') == 'impl' or e.hasAttribute('data-component')):
for c in e.childNodes:
process_element(c)
return
t, offsets = getNodeText(e)
for id, pattern, keyword, previously in spec_assertion_patterns:
m = pattern.search(t)
if m:
# When the pattern-match isn't enough to uniquely identify a sentence,
# allow explicit back-references to earlier paragraphs
if previously:
if len(previously) >= 3:
n, text, exp = previously
else:
n, text = previously
exp = True
node = e
while n and node.previousSibling:
node = node.previousSibling
n -= 1
if (text not in getNodeText(node)[0]) == exp:
continue # discard this match
if id in matched_assertions:
print "Spec statement %s matches multiple places" % id
matched_assertions[id] = True
if m.lastindex != 1:
print "Spec statement %s has incorrect number of match groups" % id
end = m.end(1)
end_node = None
for end_node, o in offsets:
if end < o:
break
end -= o
assert(end_node)
n1 = doc.createElement('span')
n1.setAttribute('class', 'testrefs kw-%s' % keyword)
n1.setAttribute('id', 'testrefs.%s' % id)
n1.appendChild(doc.createTextNode(' '))
n = n1.appendChild(doc.createElement('a'))
n.setAttribute('href', '#testrefs.%s' % id)
n.setAttribute('title', id)
n.appendChild(doc.createTextNode('#'))
n1.appendChild(doc.createTextNode(' '))
for test_id in spec_refs.get(id, []):
n = n1.appendChild(doc.createElement('a'))
n.setAttribute('href', '../canvas/%s.html' % test_id)
n.appendChild(doc.createTextNode(test_id))
n1.appendChild(doc.createTextNode(' '))
n0 = doc.createTextNode(end_node.nodeValue[:end])
n2 = doc.createTextNode(end_node.nodeValue[end:])
p = end_node.parentNode
p.replaceChild(n2, end_node)
p.insertBefore(n1, n2)
p.insertBefore(n0, n1)
t, offsets = getNodeText(e)
for e in doc.getElementsByTagName('body')[0].childNodes:
process_element(e)
for s in spec_assertions:
if s['id'] not in matched_assertions:
print "Annotation incomplete: Unmatched spec statement %s" % s['id']
# Convert from XHTML back to HTML
doc.documentElement.removeAttribute('xmlns')
doc.documentElement.setAttribute('lang', doc.documentElement.getAttribute('xml:lang'))
head = doc.documentElement.getElementsByTagName('head')[0]
head.insertBefore(doc.createElement('meta'), head.firstChild).setAttribute('charset', 'UTF-8')
f = codecs.open('%s/canvas.html' % SPECOUTPUTDIR, 'w', 'utf-8')
f.write(htmlSerializer(doc))
if not W3CMODE:
write_index()
write_category_indexes()
write_reportgen()
write_results()
write_annotated_spec() | unknown | codeparrot/codeparrot-clean | ||
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# test_pwc.py - This file is part of the PySptools package.
#
"""
The following function is tested:
bilateral
"""
from __future__ import print_function
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import pysptools.sigproc as sig
def tests():
plt.ioff()
data_path = os.environ['PYSPTOOLS_DATA']
home = os.environ['HOME']
result_path = osp.join(home, 'results')
if osp.exists(result_path) == False:
os.makedirs(result_path)
fin = open(os.path.join(data_path, 'dnagwas.txt'))
signal_txt = fin.readlines()
signal = [float(x) for x in signal_txt]
z = sig.bilateral(np.array(signal), 0, 10, 25, display=1, maxiter=5)
plt.plot(signal)
plt.plot(z, color='r')
if os.path.exists(result_path) == False:
os.makedirs(result_path)
plt.savefig(os.path.join(result_path, 'dnagwas.png'))
if __name__ == '__main__':
import sys
print(sys.version_info)
tests() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os.path
import StringIO
import sys
import markdown
import yaml
# config
CHANGE_KEYS = ('new', 'changed', 'fixed')
#DESTINATION = '../html'
DESTINATION = '/Users/naotaka/Development/GAE/data/clipmenu'
FILENAME = 'versionhistory/index.txt'
#INPUT_PATH = '../VersionHistory-en.yaml'
#INPUT_PATH = '../VersionHistory-ja.yaml'
DOWNLOAD_DIR = 'https://dl.dropbox.com/u/1140644/clipmenu/'
# path
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
#basename = os.path.splitext(os.path.basename(INPUT_PATH))[0]
#dest_path = os.path.join(CURRENT_DIR, DESTINATION, basename + '.html')
#yaml_path = os.path.join(CURRENT_DIR, INPUT_PATH)
def format_date(d, locale):
if locale == 'ja':
return u'更新日: %s' % d.strftime("%Y年%m月%d日").decode('utf8')
else:
return 'Release Date: %s' % d.strftime("%b %d, %Y")
def make_download_link(version, locale):
url = '%sClipMenu_%s.dmg' % (DOWNLOAD_DIR, version)
link_text = 'Download'
if locale == 'ja':
link_text = u'ダウンロード'
return u'[%s](%s "%s")' % (link_text, url, link_text)
# main
if len(sys.argv) != 2:
print "変換するYAMLファイルを指定して下さい"
exit()
arg = sys.argv[1]
name, ext = os.path.splitext(os.path.basename(arg))
if ext.lower() != '.yaml':
print "'.yaml'の拡張子を持ったファイルを指定して下さい。"
exit()
#basename = os.path.splitext(os.path.basename(arg))[0]
#dest_path = os.path.join(CURRENT_DIR, DESTINATION, basename + '.txt')
locale = name.split('-')[1]
dest_path = os.path.join(DESTINATION, locale, FILENAME)
# load YAML
data = None
with file(arg, 'r') as f:
data = yaml.load(f)
# Retrieve data
string = StringIO.StringIO()
for item in sorted(data.items(), reverse=True):
version, content = item
download_link = make_download_link(version, locale)
updated = format_date(content['date'], locale)
changes = content.get('changes')
string.write('* #### %s\n' % (version,))
string.write('\t* %s\n' % download_link)
string.write('\t* %s\n' % updated)
if changes:
for key in CHANGE_KEYS:
change = changes.get(key)
if not change:
continue
string.write('\t* [%s]\n' % key)
for u in change:
string.write('\t\t* %s\n' % u)
string.write('\n')
#html = markdown.markdown(string.getvalue())
text = string.getvalue()
string.close()
# Write to HTML
with file(dest_path, 'w') as f:
f.write(text.encode('utf8'))
#yaml.dump(data, f, encoding='utf8', allow_unicode=True, default_flow_style=False) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
func(open(args[0], 'rb'), sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from lxml import etree
class account_print_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.print.journal'
_description = 'Account Print Journal'
_columns = {
'sort_selection': fields.selection([('l.date', 'Date'),
('am.name', 'Journal Entry Number'),],
'Entries Sorted by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_print_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'sort_selection': 'am.name',
'filter': 'filter_period',
'journal_ids': False,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
'''
used to set the domain on 'journal_ids' field: we exclude or only propose the journals of type
sale/purchase (+refund) accordingly to the presence of the key 'sale_purchase_only' in the context.
'''
if context is None:
context = {}
res = super(account_print_journal, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('sale_purchase_only'):
domain ="[('type', 'in', ('sale','purchase','sale_refund','purchase_refund'))]"
else:
domain ="[('type', 'not in', ('sale','purchase','sale_refund','purchase_refund'))]"
nodes = doc.xpath("//field[@name='journal_ids']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['sort_selection'], context=context)[0])
if context.get('sale_purchase_only'):
return self.pool['report'].get_action(cr, uid, [], 'account.report_salepurchasejournal', data=data, context=context)
else:
return self.pool['report'].get_action(cr, uid, [], 'account.report_journal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys
import urlparse
from hopper.utils.logger import *
import hopper.utils.args
import hopper.utils.Proxy
import hopper.utils.tasks
class CommandHopperBase(hopper.utils.args.CommandBase):
threadLimit = hopper.utils.args.ValueOption(
None, "threads",
default = None,
description = "The maximum number of concurrent threads avaliable.\n" +
"(Default is to automatically detect)")
mirror = hopper.utils.args.ValueOption(
None, "mirror",
default = None,
description = "The location of a git repository mirror. These repositories will be used to seed the clones.\n" +
"(This can be defined via the environment variable HOPPER_MIRROR.)")
locallayers = hopper.utils.args.ValueOption(
None, "local-layers",
default = None,
description = "The location of layers for which are local and can be symlinked to. This is useful for development.\n" +
"(This can be defined via the environment variable HOPPER_LOCAL.)")
def __init__(self):
hopper.utils.args.CommandBase.__init__(self)
self.environment = None
def execute(self, handler = None):
hopper.utils.args.CommandBase.execute(self)
if self.threadLimit:
threads = self.threadLimit
else:
threads = CommandHopperBase.getDefaultThreads()
self.environment = hopper.utils.tasks.Environment(
basepath = os.getcwd(),
mirrorpath = CommandHopperBase.valueOrEnvironment(self.mirror, "HOPPER_MIRROR"),
proxy = CommandHopperBase.getProxy(),
threads = threads,
locallayers = CommandHopperBase.valueOrEnvironment(self.locallayers, "HOPPER_LOCAL"))
return True
@staticmethod
def valueOrEnvironment(value, env):
if value:
return value
elif env in os.environ:
return os.environ[env]
return None
@staticmethod
def getDefaultThreads():
import multiprocessing
systemthreads = multiprocessing.cpu_count()
activecpus = systemthreads / 2
debug("Detected %s threads avaliable to system (using half, %s threads)" % (systemthreads, activecpus))
# Check if using LSF and account for it
if "LSB_DJOB_NUMPROC" in os.environ:
try:
activecpus = int(os.environ["LSB_DJOB_NUMPROC"])
warning("Forced default threads by LSF environment to %s threads" % activecpus)
except:
pass
return activecpus
@staticmethod
def getHttpProxyUri():
if "http_proxy" in os.environ:
return urlparse.urlparse(os.environ["http_proxy"])
elif "HTTP_PROXY" in os.environ:
return urlparse.urlparse(os.environ["HTTP_PROXY"])
return None
@staticmethod
def getProxy():
uri = CommandHopperBase.getHttpProxyUri()
if uri:
return hopper.utils.Proxy.Proxy(uri.hostname, uri.port)
return None | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from .go_benchmark import Benchmark
class FreudensteinRoth(Benchmark):
r"""
FreudensteinRoth objective function.
This class defines the Freudenstein & Roth [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{FreudensteinRoth}}(x) = \left\{x_1 - 13 + \left[(5 - x_2) x_2
- 2 \right] x_2 \right\}^2 + \left \{x_1 - 29
+ \left[(x_2 + 1) x_2 - 14 \right] x_2 \right\}^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [[5.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2
f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2
return f1 + f2 | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/bitcoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/bitcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
print("Checksum did not match")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'bitcoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/bitcoin/bitcoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['bitcoind', 'bitcoin-cli', 'bitcoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args)) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/commentWithUnreasonableIndentationLevel01.ts] ////
//// [commentWithUnreasonableIndentationLevel01.ts]
// Repro from #41223
/**
* This is a comment with dumb indentation for some auto-generated thing.
*/
export class SomeAutoGeneratedThing {}
//// [commentWithUnreasonableIndentationLevel01.js]
// Repro from #41223
/**
* This is a comment with dumb indentation for some auto-generated thing.
*/
export class SomeAutoGeneratedThing {
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/commentWithUnreasonableIndentationLevel01.js |
#ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H
#include "jemalloc/internal/hpdata.h"
/*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by
* hugepages, or at least could be), and handles allocation and deallocation
* requests.
*/
/*
* One more than the maximum pszind_t we will serve out of the HPA.
* Practically, we expect only the first few to be actually used. This
* corresponds to a maximum size of of 512MB on systems with 4k pages and
* SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
* can think of this as being SC_NPSIZES, but there's no sense in wasting that
* much space in the arena, making bitmaps that much larger, etc.
*/
#define PSSET_NPSIZES 64
/*
* We keep two purge lists per page size class; one for hugified hpdatas (at
* index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
* 1). This lets us implement a preference for purging non-hugified hpdatas
* among similarly-dirty ones.
* We reserve the last two indices for empty slabs, in that case purging
* hugified ones (which are definitionally all waste) before non-hugified ones
* (i.e. reversing the order).
*/
#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
typedef struct psset_bin_stats_s psset_bin_stats_t;
struct psset_bin_stats_s {
/* How many pageslabs are in this bin? */
size_t npageslabs;
/* Of them, how many pages are active? */
size_t nactive;
/* And how many are dirty? */
size_t ndirty;
};
typedef struct psset_stats_s psset_stats_t;
struct psset_stats_s {
/*
* The second index is huge stats; nonfull_slabs[pszind][0] contains
* stats for the non-huge slabs in bucket pszind, while
* nonfull_slabs[pszind][1] contains stats for the huge slabs.
*/
psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
/*
* Full slabs don't live in any edata heap, but we still track their
* stats.
*/
psset_bin_stats_t full_slabs[2];
/* Empty slabs are similar. */
psset_bin_stats_t empty_slabs[2];
};
typedef struct psset_s psset_t;
struct psset_s {
/*
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
/*
* The sum of all bin stats in stats. This lets us quickly answer
* queries for the number of dirty, active, and retained pages in the
* entire set.
*/
psset_bin_stats_t merged_stats;
psset_stats_t stats;
/*
* Slabs with no active allocations, but which are allowed to serve new
* allocations.
*/
hpdata_empty_list_t empty;
/*
* Slabs which are available to be purged, ordered by how much we want
* to purge them (with later indices indicating slabs we want to purge
* more).
*/
hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS];
/* Bitmap for which set bits correspond to non-empty purge lists. */
fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)];
/* Slabs which are available to be hugified. */
hpdata_hugify_list_t to_hugify;
};
void psset_init(psset_t *psset);
void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
/*
* Begin or end updating the given pageslab's metadata. While the pageslab is
* being updated, it won't be returned from psset_fit calls.
*/
void psset_update_begin(psset_t *psset, hpdata_t *ps);
void psset_update_end(psset_t *psset, hpdata_t *ps);
/* Analogous to the eset_fit; pick a hpdata to serve the request. */
hpdata_t *psset_pick_alloc(psset_t *psset, size_t size);
/* Pick one to purge. */
hpdata_t *psset_pick_purge(psset_t *psset);
/* Pick one to hugify. */
hpdata_t *psset_pick_hugify(psset_t *psset);
void psset_insert(psset_t *psset, hpdata_t *ps);
void psset_remove(psset_t *psset, hpdata_t *ps);
static inline size_t
psset_npageslabs(psset_t *psset) {
return psset->merged_stats.npageslabs;
}
static inline size_t
psset_nactive(psset_t *psset) {
return psset->merged_stats.nactive;
}
static inline size_t
psset_ndirty(psset_t *psset) {
return psset->merged_stats.ndirty;
}
#endif /* JEMALLOC_INTERNAL_PSSET_H */ | c | github | https://github.com/redis/redis | deps/jemalloc/include/jemalloc/internal/psset.h |
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {REDIRECT_ROUTES} from './redirections';
import {Route} from '@angular/router';
describe('REDIRECT_ROUTES', () => {
it('should have all redirectTo values starting with a "/"', () => {
const checkRoutes = (routes: Route[]) => {
for (const route of routes) {
if (route.redirectTo) {
if (typeof route.redirectTo === 'string') {
expect(route.redirectTo.startsWith('/'))
.withContext(`Invalid redirectTo: ${route.redirectTo}`)
.toBe(true);
}
}
if (route.children) {
checkRoutes(route.children);
}
}
};
checkRoutes(REDIRECT_ROUTES);
});
}); | typescript | github | https://github.com/angular/angular | adev/src/app/routing/redirections.spec.ts |
/*-------------------------------------------------------------------------
*
* sha1.c
* Implements the SHA1 Secure Hash Algorithm
*
* Fallback implementation of SHA1, as specified in RFC 3174.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/common/sha1.c
*
*-------------------------------------------------------------------------
*/
/* $KAME: sha1.c,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/*
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
* based on: http://www.itl.nist.gov/fipspubs/fip180-1.htm
* implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
*/
#ifndef FRONTEND
#include "postgres.h"
#else
#include "postgres_fe.h"
#endif
#include <sys/param.h>
#include "sha1_int.h"
/* constant table */
static const uint32 _K[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
#define K(t) _K[(t) / 20]
#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
#define F1(b, c, d) (((b) ^ (c)) ^ (d))
#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
#define F3(b, c, d) (((b) ^ (c)) ^ (d))
#define S(n, x) (((x) << (n)) | ((x) >> (32 - (n))))
#define H(n) (ctx->h.b32[(n)])
#define COUNT (ctx->count)
#define BCOUNT (ctx->c.b64[0] / 8)
#define W(n) (ctx->m.b32[(n)])
#define PUTPAD(x) \
do { \
ctx->m.b8[(COUNT % 64)] = (x); \
COUNT++; \
COUNT %= 64; \
if (COUNT % 64 == 0) \
sha1_step(ctx); \
} while (0)
static void
sha1_step(pg_sha1_ctx *ctx)
{
uint32 a,
b,
c,
d,
e;
size_t t,
s;
uint32 tmp;
#ifndef WORDS_BIGENDIAN
pg_sha1_ctx tctx;
memmove(&tctx.m.b8[0], &ctx->m.b8[0], 64);
ctx->m.b8[0] = tctx.m.b8[3];
ctx->m.b8[1] = tctx.m.b8[2];
ctx->m.b8[2] = tctx.m.b8[1];
ctx->m.b8[3] = tctx.m.b8[0];
ctx->m.b8[4] = tctx.m.b8[7];
ctx->m.b8[5] = tctx.m.b8[6];
ctx->m.b8[6] = tctx.m.b8[5];
ctx->m.b8[7] = tctx.m.b8[4];
ctx->m.b8[8] = tctx.m.b8[11];
ctx->m.b8[9] = tctx.m.b8[10];
ctx->m.b8[10] = tctx.m.b8[9];
ctx->m.b8[11] = tctx.m.b8[8];
ctx->m.b8[12] = tctx.m.b8[15];
ctx->m.b8[13] = tctx.m.b8[14];
ctx->m.b8[14] = tctx.m.b8[13];
ctx->m.b8[15] = tctx.m.b8[12];
ctx->m.b8[16] = tctx.m.b8[19];
ctx->m.b8[17] = tctx.m.b8[18];
ctx->m.b8[18] = tctx.m.b8[17];
ctx->m.b8[19] = tctx.m.b8[16];
ctx->m.b8[20] = tctx.m.b8[23];
ctx->m.b8[21] = tctx.m.b8[22];
ctx->m.b8[22] = tctx.m.b8[21];
ctx->m.b8[23] = tctx.m.b8[20];
ctx->m.b8[24] = tctx.m.b8[27];
ctx->m.b8[25] = tctx.m.b8[26];
ctx->m.b8[26] = tctx.m.b8[25];
ctx->m.b8[27] = tctx.m.b8[24];
ctx->m.b8[28] = tctx.m.b8[31];
ctx->m.b8[29] = tctx.m.b8[30];
ctx->m.b8[30] = tctx.m.b8[29];
ctx->m.b8[31] = tctx.m.b8[28];
ctx->m.b8[32] = tctx.m.b8[35];
ctx->m.b8[33] = tctx.m.b8[34];
ctx->m.b8[34] = tctx.m.b8[33];
ctx->m.b8[35] = tctx.m.b8[32];
ctx->m.b8[36] = tctx.m.b8[39];
ctx->m.b8[37] = tctx.m.b8[38];
ctx->m.b8[38] = tctx.m.b8[37];
ctx->m.b8[39] = tctx.m.b8[36];
ctx->m.b8[40] = tctx.m.b8[43];
ctx->m.b8[41] = tctx.m.b8[42];
ctx->m.b8[42] = tctx.m.b8[41];
ctx->m.b8[43] = tctx.m.b8[40];
ctx->m.b8[44] = tctx.m.b8[47];
ctx->m.b8[45] = tctx.m.b8[46];
ctx->m.b8[46] = tctx.m.b8[45];
ctx->m.b8[47] = tctx.m.b8[44];
ctx->m.b8[48] = tctx.m.b8[51];
ctx->m.b8[49] = tctx.m.b8[50];
ctx->m.b8[50] = tctx.m.b8[49];
ctx->m.b8[51] = tctx.m.b8[48];
ctx->m.b8[52] = tctx.m.b8[55];
ctx->m.b8[53] = tctx.m.b8[54];
ctx->m.b8[54] = tctx.m.b8[53];
ctx->m.b8[55] = tctx.m.b8[52];
ctx->m.b8[56] = tctx.m.b8[59];
ctx->m.b8[57] = tctx.m.b8[58];
ctx->m.b8[58] = tctx.m.b8[57];
ctx->m.b8[59] = tctx.m.b8[56];
ctx->m.b8[60] = tctx.m.b8[63];
ctx->m.b8[61] = tctx.m.b8[62];
ctx->m.b8[62] = tctx.m.b8[61];
ctx->m.b8[63] = tctx.m.b8[60];
#endif
a = H(0);
b = H(1);
c = H(2);
d = H(3);
e = H(4);
for (t = 0; t < 20; t++)
{
s = t & 0x0f;
if (t >= 16)
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 20; t < 40; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 40; t < 60; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 60; t < 80; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
H(0) = H(0) + a;
H(1) = H(1) + b;
H(2) = H(2) + c;
H(3) = H(3) + d;
H(4) = H(4) + e;
memset(&ctx->m.b8[0], 0, 64);
}
static void
sha1_pad(pg_sha1_ctx *ctx)
{
size_t padlen; /* pad length in bytes */
size_t padstart;
PUTPAD(0x80);
padstart = COUNT % 64;
padlen = 64 - padstart;
if (padlen < 8)
{
memset(&ctx->m.b8[padstart], 0, padlen);
COUNT += padlen;
COUNT %= 64;
sha1_step(ctx);
padstart = COUNT % 64; /* should be 0 */
padlen = 64 - padstart; /* should be 64 */
}
memset(&ctx->m.b8[padstart], 0, padlen - 8);
COUNT += (padlen - 8);
COUNT %= 64;
#ifdef WORDS_BIGENDIAN
PUTPAD(ctx->c.b8[0]);
PUTPAD(ctx->c.b8[1]);
PUTPAD(ctx->c.b8[2]);
PUTPAD(ctx->c.b8[3]);
PUTPAD(ctx->c.b8[4]);
PUTPAD(ctx->c.b8[5]);
PUTPAD(ctx->c.b8[6]);
PUTPAD(ctx->c.b8[7]);
#else
PUTPAD(ctx->c.b8[7]);
PUTPAD(ctx->c.b8[6]);
PUTPAD(ctx->c.b8[5]);
PUTPAD(ctx->c.b8[4]);
PUTPAD(ctx->c.b8[3]);
PUTPAD(ctx->c.b8[2]);
PUTPAD(ctx->c.b8[1]);
PUTPAD(ctx->c.b8[0]);
#endif
}
static void
sha1_result(uint8 *digest0, pg_sha1_ctx *ctx)
{
uint8 *digest;
digest = digest0;
#ifdef WORDS_BIGENDIAN
memmove(digest, &ctx->h.b8[0], 20);
#else
digest[0] = ctx->h.b8[3];
digest[1] = ctx->h.b8[2];
digest[2] = ctx->h.b8[1];
digest[3] = ctx->h.b8[0];
digest[4] = ctx->h.b8[7];
digest[5] = ctx->h.b8[6];
digest[6] = ctx->h.b8[5];
digest[7] = ctx->h.b8[4];
digest[8] = ctx->h.b8[11];
digest[9] = ctx->h.b8[10];
digest[10] = ctx->h.b8[9];
digest[11] = ctx->h.b8[8];
digest[12] = ctx->h.b8[15];
digest[13] = ctx->h.b8[14];
digest[14] = ctx->h.b8[13];
digest[15] = ctx->h.b8[12];
digest[16] = ctx->h.b8[19];
digest[17] = ctx->h.b8[18];
digest[18] = ctx->h.b8[17];
digest[19] = ctx->h.b8[16];
#endif
}
/* External routines for this SHA1 implementation */
/*
* pg_sha1_init
*
* Initialize a SHA1 context.
*/
void
pg_sha1_init(pg_sha1_ctx *ctx)
{
memset(ctx, 0, sizeof(pg_sha1_ctx));
H(0) = 0x67452301;
H(1) = 0xefcdab89;
H(2) = 0x98badcfe;
H(3) = 0x10325476;
H(4) = 0xc3d2e1f0;
}
/*
* pg_sha1_update
*
* Update a SHA1 context.
*/
void
pg_sha1_update(pg_sha1_ctx *ctx, const uint8 *data, size_t len)
{
const uint8 *input;
size_t gaplen;
size_t gapstart;
size_t off;
size_t copysiz;
input = data;
off = 0;
while (off < len)
{
gapstart = COUNT % 64;
gaplen = 64 - gapstart;
copysiz = (gaplen < len - off) ? gaplen : len - off;
memmove(&ctx->m.b8[gapstart], &input[off], copysiz);
COUNT += copysiz;
COUNT %= 64;
ctx->c.b64[0] += copysiz * 8;
if (COUNT % 64 == 0)
sha1_step(ctx);
off += copysiz;
}
}
/*
* pg_sha1_final
*
* Finalize a SHA1 context.
*/
void
pg_sha1_final(pg_sha1_ctx *ctx, uint8 *dest)
{
sha1_pad(ctx);
sha1_result(dest, ctx);
} | c | github | https://github.com/postgres/postgres | src/common/sha1.c |
@import "@sass/abstracts/vars";
@import "@sass/abstracts/mixins";
@import "@fontawesome/scss/mixins";
@import "@fontawesome/scss/variables";
.navigation.sitemap-navigation {
.level2 a {
position: relative;
padding-left: 10px;
&:before {
@include font-size(1.4);
@include fa-icon();
transform: translateY(-50%) scale(1, -1);
position: absolute;
content: $fa-var-share;
top: 50%;
left: -8px;
}
}
} | unknown | github | https://github.com/vercel/next.js | examples/cms-sitecore-xmcloud/src/assets/sass/components/navigation/_sitemap-navigation.scss |
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class SapoIE(InfoExtractor):
IE_DESC = 'SAPO Vídeos'
_VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})'
_TESTS = [
{
'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi',
'md5': '79ee523f6ecb9233ac25075dee0eda83',
'note': 'SD video',
'info_dict': {
'id': 'UBz95kOtiWYUMTA5Ghfi',
'ext': 'mp4',
'title': 'Benfica - Marcas na Hitória',
'description': 'md5:c9082000a128c3fd57bf0299e1367f22',
'duration': 264,
'uploader': 'tiago_1988',
'upload_date': '20080229',
'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'],
},
},
{
'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF',
'md5': '90a2f283cfb49193fe06e861613a72aa',
'note': 'HD video',
'info_dict': {
'id': 'IyusNAZ791ZdoCY5H5IF',
'ext': 'mp4',
'title': 'Codebits VII - Report',
'description': 'md5:6448d6fd81ce86feac05321f354dbdc8',
'duration': 144,
'uploader': 'codebits',
'upload_date': '20140427',
'categories': ['codebits', 'codebits2014'],
},
},
{
'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz',
'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac',
'note': 'v2 video',
'info_dict': {
'id': 'yLqjzPtbTimsn2wWBKHz',
'ext': 'mp4',
'title': 'Hipnose Condicionativa 4',
'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40',
'duration': 692,
'uploader': 'sapozen',
'upload_date': '20090609',
'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'],
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
item = self._download_xml(
'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item')
title = item.find('./title').text
description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text
thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url')
duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text)
uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text
upload_date = unified_strdate(item.find('./pubDate').text)
view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text)
comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text)
tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text
categories = tags.split() if tags else []
age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0
video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text
video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x')
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'sd',
'width': int(video_size[0]),
'height': int(video_size[1]),
}]
if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true':
formats.append({
'url': re.sub(r'/mov/1$', '/mov/39', video_url),
'ext': 'mp4',
'format_id': 'hd',
'width': 1280,
'height': 720,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': age_limit,
'formats': formats,
} | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import time
import datetime
from lib.common.out import *
from lib.common.objects import File
from lib.core.database import Database
from lib.core.investigation import __project__
class Session(object):
def __init__(self):
self.id = None
# This will be assigned with the File object of the file currently
# being analyzed.
self.file = None
# Timestamp of the creation of the session.
self.created_at = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MISP event associated to the object
self.misp_event = None
class Sessions(object):
def __init__(self):
self.current = None
self.sessions = []
# Store the results of the last "find" command.
self.find = None
def close(self):
self.current = None
def is_set(self):
# Check if the session has been opened or not.
if self.current:
return True
else:
return False
def switch(self, session):
self.current = session
print_info("Switched to session #{0} on {1}".format(self.current.id, self.current.file.path))
def new(self, path=None, misp_event=None):
if path is None and misp_event is None:
print_error("You have to open a session on a path or on a misp event.")
return
if __project__.name:
pass
else:
print_error("You must open an investigation to store files")
return
session = Session()
total = len(self.sessions)
session.id = total + 1
if path is not None:
if self.is_set() and self.current.misp_event:
session.misp_event = self.current.misp_event
# Open a section on the given file.
session.file = File(path)
# Try to lookup the file in the database. If it is already present
# we get file name and
row = Database().find(key='sha256', value=session.file.sha256)
if row:
session.file.name = row[0].name
session.file.tags = ', '.join(tag.to_dict()['tag'] for tag in row[0].tag)
print_info("Session opened on {0}".format(path))
if misp_event is not None:
if self.is_set() and self.current.file:
session.file = self.current.file
refresh = False
if self.current is not None and self.current.misp_event is not None \
and self.current.misp_event.event_id == misp_event.event_id:
refresh = True
session.misp_event = misp_event
if refresh:
print_info("Session on MISP event {0} refreshed.".format(misp_event.event_id))
else:
print_info("Session opened on MISP event {0}.".format(misp_event.event_id))
if session.file is not None:
# Loop through all existing sessions and check whether there's another
# session open on the same file and delete it. This is to avoid
# duplicates in sessions.
# NOTE: in the future we might want to remove this if sessions have
# unique attributes (for example, an history just for each of them).
for entry in self.sessions:
if entry.file is not None and entry.file.sha256 == session.file.sha256:
self.sessions.remove(entry)
# Add new session to the list.
self.sessions.append(session)
# Mark the new session as the current one.
self.current = session
__sessions__ = Sessions() | unknown | codeparrot/codeparrot-clean | ||
"""Tests for the SmartThings config flow module."""
from unittest.mock import AsyncMock, Mock, patch
from uuid import uuid4
from aiohttp import ClientResponseError
from pysmartthings import APIResponseError
from pysmartthings.installedapp import format_install_url
from homeassistant import data_entry_flow
from homeassistant.components.smartthings import smartapp
from homeassistant.components.smartthings.const import (
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
DOMAIN,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
)
from tests.common import MockConfigEntry
async def test_import_shows_user_step(hass):
"""Test import source shows the user form."""
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
async def test_entry_created(hass, app, app_oauth_client, location, smartthings_mock):
"""Test local webhook, new app, install event creates entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_from_update_event(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test local webhook, new app, update event creates entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_update(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_existing_app_new_oauth_client(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test entry is created with an existing app and generation of a new oauth client."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = [app]
smartthings_mock.generate_app_oauth.return_value = app_oauth_client
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_existing_app_copies_oauth_client(
hass, app, location, smartthings_mock
):
"""Test entry is created with an existing app and copies the oauth client from another entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
oauth_client_id = str(uuid4())
oauth_client_secret = str(uuid4())
smartthings_mock.apps.return_value = [app]
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_APP_ID: app.app_id,
CONF_CLIENT_ID: oauth_client_id,
CONF_CLIENT_SECRET: oauth_client_secret,
CONF_LOCATION_ID: str(uuid4()),
CONF_INSTALLED_APP_ID: str(uuid4()),
CONF_ACCESS_TOKEN: token,
},
)
entry.add_to_hass(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Assert access token is defaulted to an existing entry for convenience.
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == oauth_client_secret
assert result["data"][CONF_CLIENT_ID] == oauth_client_id
assert result["title"] == location.name
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data[CONF_INSTALLED_APP_ID] == installed_app_id
),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_with_cloudhook(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test cloud, new app, install event creates entry."""
hass.config.components.add("cloud")
# Unload the endpoint so we can reload it under the cloud.
await smartapp.unload_smartapp_endpoint(hass)
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app = AsyncMock(return_value=(app, app_oauth_client))
smartthings_mock.locations = AsyncMock(return_value=[location])
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
with patch.object(
hass.components.cloud, "async_active_subscription", Mock(return_value=True)
), patch.object(
hass.components.cloud,
"async_create_cloudhook",
AsyncMock(return_value="http://cloud.test"),
) as mock_create_cloudhook:
await smartapp.setup_smartapp_endpoint(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
assert mock_create_cloudhook.call_count == 1
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_invalid_webhook_aborts(hass):
"""Test flow aborts if webhook is invalid."""
# Webhook confirmation shown
await async_process_ha_core_config(
hass,
{"external_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_webhook_url"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
assert "component_url" in result["description_placeholders"]
async def test_invalid_token_shows_error(hass):
"""Test an error is shown for invalid token formats."""
token = "123456789"
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_invalid_format"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unauthorized_token_shows_error(hass, smartthings_mock):
"""Test an error is shown for unauthorized token formats."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_UNAUTHORIZED
)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_unauthorized"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_forbidden_token_shows_error(hass, smartthings_mock):
"""Test an error is shown for forbidden token formats."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_forbidden"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_webhook_problem_shows_error(hass, smartthings_mock):
"""Test an error is shown when there's an problem with the webhook endpoint."""
token = str(uuid4())
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=422
)
error.is_target_error = Mock(return_value=True)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "webhook_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_api_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when other API errors occur."""
token = str(uuid4())
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=400
)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unknown_response_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
error = ClientResponseError(
request_info=request_info, history=None, status=HTTP_NOT_FOUND
)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unknown_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
token = str(uuid4())
smartthings_mock.apps.side_effect = Exception("Unknown error")
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_no_available_locations_aborts(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test select location aborts if no available locations."""
token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_LOCATION_ID: location.location_id}
)
entry.add_to_hass(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_available_locations" | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import sys
import time
import traceback
import warnings
from functools import wraps
from six import iteritems
from . import formats
from . import levels
from . import outputs
from .lib import iso8601time
from .message import Message
def emit(level):
"""a decorator that emits at `level <.LogLevel>` after calling the method. The method
should return a `.Logger` instance.
For convenience, decorators for the various levels are available as
``emit.debug``, ``emit.info``, etc..
"""
def decorator(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
f(self, * args, ** kwargs)._emit(level, '', [], {})
return wrapper
return decorator
emit.debug = emit(levels.DEBUG)
emit.info = emit(levels.INFO)
emit.notice = emit(levels.NOTICE)
emit.warning = emit(levels.WARNING)
emit.error = emit(levels.ERROR)
emit.critical = emit(levels.CRITICAL)
class BaseLogger(object):
"""Base class for loggers"""
__slots__ = ['_fields', '_options', 'min_level']
__valid_options = set(Message._default_options)
def __init__(self, fields=None, options=None, min_level=None):
"""Constructor for internal module use only, basically.
``fields`` and ``options`` will be copied.
"""
self._fields = fields.copy() if fields is not None else {}
self._options = options.copy() if options is not None else Message._default_options.copy()
self.min_level = min_level if min_level is not None else levels.DEBUG
def _clone(self):
return self.__class__(fields=self._fields, options=self._options, min_level=self.min_level)
def _emit(self, level, format_spec, args, kwargs):
raise NotImplementedError
#
# The Magic
#
def fields(self, **kwargs):
"""bind fields for structured logging"""
return self.fields_dict(kwargs)
def fields_dict(self, d):
"""bind fields for structured logging.
Use this instead of `.fields` if you have keys which are not valid Python identifiers.
"""
clone = self._clone()
clone._fields.update(d)
return clone
def options(self, **kwargs):
"""bind option for message creation."""
bad_options = set(kwargs) - self.__valid_options
if bad_options:
raise ValueError("Invalid options {0!r}".format(tuple(bad_options)))
clone = self._clone()
clone._options.update(kwargs)
return clone
#
# Convenience
#
def trace(self, trace='error'):
"""convenience method to enable traceback logging"""
return self.options(trace=trace)
def name(self, name):
"""convenvience method to bind ``name`` field"""
return self.fields(name=name)
#
# Do something
#
def debug(self, format_spec='', *args, **kwargs):
"""Emit at ``DEBUG`` level"""
self._emit(levels.DEBUG, format_spec, args, kwargs)
def info(self, format_spec='', *args, **kwargs):
"""Emit at ``INFO`` level"""
self._emit(levels.INFO, format_spec, args, kwargs)
def notice(self, format_spec='', *args, **kwargs):
"""Emit at ``NOTICE`` level"""
self._emit(levels.NOTICE, format_spec, args, kwargs)
return True
def warning(self, format_spec='', *args, **kwargs):
"""Emit at ``WARNING`` level"""
self._emit(levels.WARNING, format_spec, args, kwargs)
def error(self, format_spec='', *args, **kwargs):
"""Emit at ``ERROR`` level"""
self._emit(levels.ERROR, format_spec, args, kwargs)
def critical(self, format_spec='', *args, **kwargs):
"""Emit at ``CRITICAL`` level"""
self._emit(levels.CRITICAL, format_spec, args, kwargs)
class InternalLogger(BaseLogger):
"""
Special-purpose logger for internal uses
Sends messages directly to output, bypassing :data:`.emitters`.
:ivar `Output` output: an output to write to
"""
__slots__ = ['output']
def __init__(self, output, fields=None, options=None, min_level=None):
super(InternalLogger, self).__init__(fields, options, min_level)
self.output = output
def _clone(self):
return self.__class__(fields=self._fields, options=self._options,
min_level=self.min_level, output=self.output)
def _emit(self, level, format_spec, args, kwargs):
"""does work of emitting - for internal use"""
if level < self.min_level:
return
try:
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(),
args, kwargs)
except Exception:
msg = None
raise
else:
self.output.output(msg)
except Exception:
print(iso8601time(), "Error in twiggy internal log! Something is seriously broken.",
file=sys.stderr)
print("Offending message:", repr(msg), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
class Logger(BaseLogger):
"""Logger for end-users"""
__slots__ = ['_emitters', 'filter']
def _feature_noop(self, *args, **kwargs):
return self._clone()
@classmethod
def addFeature(cls, func, name=None):
"""add a feature to the class
:arg func: the function to add
:arg string name: the name to add it under. If None, use the function's name.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
name = name if name is not None else func.__name__
setattr(cls, name, func)
@classmethod
def disableFeature(cls, name):
"""disable a feature.
A method will still exist by this name, but it won't do anything.
:arg string name: the name of the feature to disable.
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
# get func directly from class dict - we don't want an unbound method.
setattr(cls, name, cls.__dict__['_feature_noop'])
@classmethod
def delFeature(cls, name):
"""delete a feature entirely
:arg string name: the name of the feature to remove
"""
warnings.warn("Use of features is currently discouraged, pending refactoring",
RuntimeWarning)
delattr(cls, name)
def __init__(self, fields=None, options=None, emitters=None,
min_level=None, filter=None):
super(Logger, self).__init__(fields, options, min_level)
#: a dict of emitters
self._emitters = emitters if emitters is not None else {}
self.filter = filter if filter is not None else lambda format_spec: True
def _clone(self):
"""return a new Logger instance with copied attributes
Probably only for internal use.
"""
return self.__class__(fields=self._fields, options=self._options,
emitters=self._emitters, min_level=self.min_level,
filter=self.filter)
@emit.info
def struct(self, **kwargs):
"""convenience method for structured logging.
Calls fields() and emits at INFO
"""
return self.fields(**kwargs)
@emit.info
def struct_dict(self, d):
"""convenience method for structured logging.
Use instead of struct() if you have keys which are not valid Python identifiers
"""
return self.fields_dict(d)
#
# Boring stuff
#
def _emit(self, level, format_spec, args, kwargs):
"""does the work of emitting - for internal use"""
# XXX should these traps be collapsed?
if level < self.min_level:
return
try:
if not self.filter(format_spec):
return
except Exception:
internal_log.info("Error in Logger filtering with {0} on {1}",
repr(self.filter), format_spec)
# just continue emitting in face of filter error
# XXX should we trap here too b/c of "Dictionary changed size during iteration" (or
# other rare errors?)
potential_emitters = [(name, emitter) for name, emitter in iteritems(self._emitters)
if level >= emitter.min_level]
if not potential_emitters:
return
try:
msg = Message(level, format_spec, self._fields.copy(), self._options.copy(),
args, kwargs)
except Exception:
# XXX use .fields() instead?
internal_log.info("Error formatting message level: {0!r}, format: {1!r},"
" fields: {2!r}, options: {3!r}, args: {4!r}, kwargs: {5!r}",
level, format_spec, self._fields, self._options, args, kwargs)
return
outputs = set()
# sort to make things deterministic (for tests, mainly)
for name, emitter in sorted(potential_emitters):
try:
include = emitter.filter(msg)
except Exception:
internal_log.info("Error filtering with emitter {0}. Filter: {1}"
" Message: {2!r}", name, repr(emitter.filter), msg)
include = True # output anyway if error
if include:
outputs.add(emitter._output)
for o in outputs:
try:
o.output(msg)
except Exception:
internal_log.warning("Error outputting with {0!r}. Message: {1!r}", o, msg)
__fields = {'time': time.gmtime}
__internal_format = formats.LineFormat(conversion=formats.line_conversion)
__internal_output = outputs.StreamOutput(format=__internal_format, stream=sys.stderr)
internal_log = InternalLogger(fields=__fields, output=__internal_output
).name('twiggy.internal').trace('error') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailGroup(TestMail):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_noone_id, user_employee_id = self.cr, self.uid, self.user_noone_id, self.user_employee_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_noone_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_noone_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_employee_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
with self.assertRaises(AccessError):
self.mail_group.create(cr, user_noone_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_employee_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_noone_id])
self.mail_group.read(cr, user_noone_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_employee_id])
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_employee_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
with self.assertRaises(AccessError):
self.mail_group.write(cr, user_noone_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
with self.assertRaises(except_orm):
self.mail_group.unlink(cr, user_noone_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_employee_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left') | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\Finder;
use Symfony\Component\Finder\Comparator\DateComparator;
use Symfony\Component\Finder\Comparator\NumberComparator;
use Symfony\Component\Finder\Exception\DirectoryNotFoundException;
use Symfony\Component\Finder\Iterator\CustomFilterIterator;
use Symfony\Component\Finder\Iterator\DateRangeFilterIterator;
use Symfony\Component\Finder\Iterator\DepthRangeFilterIterator;
use Symfony\Component\Finder\Iterator\ExcludeDirectoryFilterIterator;
use Symfony\Component\Finder\Iterator\FilecontentFilterIterator;
use Symfony\Component\Finder\Iterator\FilenameFilterIterator;
use Symfony\Component\Finder\Iterator\LazyIterator;
use Symfony\Component\Finder\Iterator\SizeRangeFilterIterator;
use Symfony\Component\Finder\Iterator\SortableIterator;
/**
* Finder allows to build rules to find files and directories.
*
* It is a thin wrapper around several specialized iterator classes.
*
* All rules may be invoked several times.
*
* All methods return the current Finder object to allow chaining:
*
* $finder = Finder::create()->files()->name('*.php')->in(__DIR__);
*
* @author Fabien Potencier <fabien@symfony.com>
*
* @implements \IteratorAggregate<non-empty-string, SplFileInfo>
*/
class Finder implements \IteratorAggregate, \Countable
{
public const IGNORE_VCS_FILES = 1;
public const IGNORE_DOT_FILES = 2;
public const IGNORE_VCS_IGNORED_FILES = 4;
private int $mode = 0;
private array $names = [];
private array $notNames = [];
private array $exclude = [];
private array $filters = [];
private array $pruneFilters = [];
private array $depths = [];
private array $sizes = [];
private bool $followLinks = false;
private bool $unixPaths = false;
private bool $reverseSorting = false;
private \Closure|int|false $sort = false;
private int $ignore = 0;
/** @var list<string> */
private array $dirs = [];
private array $dates = [];
/** @var list<iterable<SplFileInfo|\SplFileInfo|string>> */
private array $iterators = [];
private array $contains = [];
private array $notContains = [];
private array $paths = [];
private array $notPaths = [];
private bool $ignoreUnreadableDirs = false;
private static array $vcsPatterns = ['.svn', '_svn', 'CVS', '_darcs', '.arch-params', '.monotone', '.bzr', '.git', '.hg'];
public function __construct()
{
$this->ignore = static::IGNORE_VCS_FILES | static::IGNORE_DOT_FILES;
}
/**
* Creates a new Finder.
*/
public static function create(): static
{
return new static();
}
/**
* Restricts the matching to directories only.
*
* @return $this
*/
public function directories(): static
{
$this->mode = Iterator\FileTypeFilterIterator::ONLY_DIRECTORIES;
return $this;
}
/**
* Restricts the matching to files only.
*
* @return $this
*/
public function files(): static
{
$this->mode = Iterator\FileTypeFilterIterator::ONLY_FILES;
return $this;
}
/**
* Adds tests for the directory depth.
*
* Usage:
*
* $finder->depth('> 1') // the Finder will start matching at level 1.
* $finder->depth('< 3') // the Finder will descend at most 3 levels of directories below the starting point.
* $finder->depth(['>= 1', '< 3'])
*
* @param string|int|string[]|int[] $levels The depth level expression or an array of depth levels
*
* @return $this
*
* @see DepthRangeFilterIterator
* @see NumberComparator
*/
public function depth(string|int|array $levels): static
{
foreach ((array) $levels as $level) {
$this->depths[] = new NumberComparator($level);
}
return $this;
}
/**
* Adds tests for file dates (last modified).
*
* The date must be something that strtotime() is able to parse:
*
* $finder->date('since yesterday');
* $finder->date('until 2 days ago');
* $finder->date('> now - 2 hours');
* $finder->date('>= 2005-10-15');
* $finder->date(['>= 2005-10-15', '<= 2006-05-27']);
*
* @param string|string[] $dates A date range string or an array of date ranges
*
* @return $this
*
* @see strtotime
* @see DateRangeFilterIterator
* @see DateComparator
*/
public function date(string|array $dates): static
{
foreach ((array) $dates as $date) {
$this->dates[] = new DateComparator($date);
}
return $this;
}
/**
* Adds rules that files must match.
*
* You can use patterns (delimited with / sign), globs or simple strings.
*
* $finder->name('/\.php$/')
* $finder->name('*.php') // same as above, without dot files
* $finder->name('test.php')
* $finder->name(['test.py', 'test.php'])
*
* @param string|string[] $patterns A pattern (a regexp, a glob, or a string) or an array of patterns
*
* @return $this
*
* @see FilenameFilterIterator
*/
public function name(string|array $patterns): static
{
$this->names = array_merge($this->names, (array) $patterns);
return $this;
}
/**
* Adds rules that files must not match.
*
* @param string|string[] $patterns A pattern (a regexp, a glob, or a string) or an array of patterns
*
* @return $this
*
* @see FilenameFilterIterator
*/
public function notName(string|array $patterns): static
{
$this->notNames = array_merge($this->notNames, (array) $patterns);
return $this;
}
/**
* Adds tests that file contents must match.
*
* Strings or PCRE patterns can be used:
*
* $finder->contains('Lorem ipsum')
* $finder->contains('/Lorem ipsum/i')
* $finder->contains(['dolor', '/ipsum/i'])
*
* @param string|string[] $patterns A pattern (string or regexp) or an array of patterns
*
* @return $this
*
* @see FilecontentFilterIterator
*/
public function contains(string|array $patterns): static
{
$this->contains = array_merge($this->contains, (array) $patterns);
return $this;
}
/**
* Adds tests that file contents must not match.
*
* Strings or PCRE patterns can be used:
*
* $finder->notContains('Lorem ipsum')
* $finder->notContains('/Lorem ipsum/i')
* $finder->notContains(['lorem', '/dolor/i'])
*
* @param string|string[] $patterns A pattern (string or regexp) or an array of patterns
*
* @return $this
*
* @see FilecontentFilterIterator
*/
public function notContains(string|array $patterns): static
{
$this->notContains = array_merge($this->notContains, (array) $patterns);
return $this;
}
/**
* Adds rules that filenames must match.
*
* You can use patterns (delimited with / sign) or simple strings.
*
* $finder->path('some/special/dir')
* $finder->path('/some\/special\/dir/') // same as above
* $finder->path(['some dir', 'another/dir'])
*
* Use only / as dirname separator.
*
* @param string|string[] $patterns A pattern (a regexp or a string) or an array of patterns
*
* @return $this
*
* @see FilenameFilterIterator
*/
public function path(string|array $patterns): static
{
$this->paths = array_merge($this->paths, (array) $patterns);
return $this;
}
/**
* Adds rules that filenames must not match.
*
* You can use patterns (delimited with / sign) or simple strings.
*
* $finder->notPath('some/special/dir')
* $finder->notPath('/some\/special\/dir/') // same as above
* $finder->notPath(['some/file.txt', 'another/file.log'])
*
* Use only / as dirname separator.
*
* @param string|string[] $patterns A pattern (a regexp or a string) or an array of patterns
*
* @return $this
*
* @see FilenameFilterIterator
*/
public function notPath(string|array $patterns): static
{
$this->notPaths = array_merge($this->notPaths, (array) $patterns);
return $this;
}
/**
* Adds tests for file sizes.
*
* $finder->size('> 10K');
* $finder->size('<= 1Ki');
* $finder->size(4);
* $finder->size(['> 10K', '< 20K'])
*
* @param string|int|string[]|int[] $sizes A size range string or an integer or an array of size ranges
*
* @return $this
*
* @see SizeRangeFilterIterator
* @see NumberComparator
*/
public function size(string|int|array $sizes): static
{
foreach ((array) $sizes as $size) {
$this->sizes[] = new NumberComparator($size);
}
return $this;
}
/**
* Excludes directories.
*
* Directories passed as argument must be relative to the ones defined with the `in()` method. For example:
*
* $finder->in(__DIR__)->exclude('ruby');
*
* @param string|array $dirs A directory path or an array of directories
*
* @return $this
*
* @see ExcludeDirectoryFilterIterator
*/
public function exclude(string|array $dirs): static
{
$this->exclude = array_merge($this->exclude, (array) $dirs);
return $this;
}
/**
* Excludes "hidden" directories and files (starting with a dot).
*
* This option is enabled by default.
*
* @return $this
*
* @see ExcludeDirectoryFilterIterator
*/
public function ignoreDotFiles(bool $ignoreDotFiles): static
{
if ($ignoreDotFiles) {
$this->ignore |= static::IGNORE_DOT_FILES;
} else {
$this->ignore &= ~static::IGNORE_DOT_FILES;
}
return $this;
}
/**
* Forces the finder to ignore version control directories.
*
* This option is enabled by default.
*
* @return $this
*
* @see ExcludeDirectoryFilterIterator
*/
public function ignoreVCS(bool $ignoreVCS): static
{
if ($ignoreVCS) {
$this->ignore |= static::IGNORE_VCS_FILES;
} else {
$this->ignore &= ~static::IGNORE_VCS_FILES;
}
return $this;
}
/**
* Forces Finder to obey .gitignore and ignore files based on rules listed there.
*
* This option is disabled by default.
*
* @return $this
*/
public function ignoreVCSIgnored(bool $ignoreVCSIgnored): static
{
if ($ignoreVCSIgnored) {
$this->ignore |= static::IGNORE_VCS_IGNORED_FILES;
} else {
$this->ignore &= ~static::IGNORE_VCS_IGNORED_FILES;
}
return $this;
}
/**
* Adds VCS patterns.
*
* @see ignoreVCS()
*
* @param string|string[] $pattern VCS patterns to ignore
*/
public static function addVCSPattern(string|array $pattern): void
{
foreach ((array) $pattern as $p) {
self::$vcsPatterns[] = $p;
}
self::$vcsPatterns = array_unique(self::$vcsPatterns);
}
/**
* Sorts files and directories by an anonymous function.
*
* The anonymous function receives two \SplFileInfo instances to compare.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sort(\Closure $closure): static
{
$this->sort = $closure;
return $this;
}
/**
* Sorts files and directories by extension.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByExtension(): static
{
$this->sort = SortableIterator::SORT_BY_EXTENSION;
return $this;
}
/**
* Sorts files and directories by name.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByName(bool $useNaturalSort = false): static
{
$this->sort = $useNaturalSort ? SortableIterator::SORT_BY_NAME_NATURAL : SortableIterator::SORT_BY_NAME;
return $this;
}
/**
* Sorts files and directories by name case insensitive.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByCaseInsensitiveName(bool $useNaturalSort = false): static
{
$this->sort = $useNaturalSort ? SortableIterator::SORT_BY_NAME_NATURAL_CASE_INSENSITIVE : SortableIterator::SORT_BY_NAME_CASE_INSENSITIVE;
return $this;
}
/**
* Sorts files and directories by size.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortBySize(): static
{
$this->sort = SortableIterator::SORT_BY_SIZE;
return $this;
}
/**
* Sorts files and directories by type (directories before files), then by name.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByType(): static
{
$this->sort = SortableIterator::SORT_BY_TYPE;
return $this;
}
/**
* Sorts files and directories by the last accessed time.
*
* This is the time that the file was last accessed, read or written to.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByAccessedTime(): static
{
$this->sort = SortableIterator::SORT_BY_ACCESSED_TIME;
return $this;
}
/**
* Reverses the sorting.
*
* @return $this
*/
public function reverseSorting(): static
{
$this->reverseSorting = true;
return $this;
}
/**
* Sorts files and directories by the last inode changed time.
*
* This is the time that the inode information was last modified (permissions, owner, group or other metadata).
*
* On Windows, since inode is not available, changed time is actually the file creation time.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByChangedTime(): static
{
$this->sort = SortableIterator::SORT_BY_CHANGED_TIME;
return $this;
}
/**
* Sorts files and directories by the last modified time.
*
* This is the last time the actual contents of the file were last modified.
*
* This can be slow as all the matching files and directories must be retrieved for comparison.
*
* @return $this
*
* @see SortableIterator
*/
public function sortByModifiedTime(): static
{
$this->sort = SortableIterator::SORT_BY_MODIFIED_TIME;
return $this;
}
/**
* Filters the iterator with an anonymous function.
*
* The anonymous function receives a \SplFileInfo and must return false
* to remove files.
*
* @param \Closure(SplFileInfo): bool $closure
* @param bool $prune Whether to skip traversing directories further
*
* @return $this
*
* @see CustomFilterIterator
*/
public function filter(\Closure $closure, bool $prune = false): static
{
$this->filters[] = $closure;
if ($prune) {
$this->pruneFilters[] = $closure;
}
return $this;
}
/**
* Forces the following of symlinks.
*
* @return $this
*/
public function followLinks(): static
{
$this->followLinks = true;
return $this;
}
/**
* Force the use of UNIX paths when recursing directories.
*
* @return $this
*/
public function useUnixPaths(): static
{
$this->unixPaths = true;
return $this;
}
/**
* Tells finder to ignore unreadable directories.
*
* By default, scanning unreadable directories content throws an AccessDeniedException.
*
* @return $this
*/
public function ignoreUnreadableDirs(bool $ignore = true): static
{
$this->ignoreUnreadableDirs = $ignore;
return $this;
}
/**
* Searches files and directories which match defined rules.
*
* @param string|string[] $dirs A directory path or an array of directories
*
* @return $this
*
* @throws DirectoryNotFoundException if one of the directories does not exist
*/
public function in(string|array $dirs): static
{
$resolvedDirs = [];
foreach ((array) $dirs as $dir) {
if (is_dir($dir)) {
$resolvedDirs[] = [$this->normalizeDir($dir)];
} elseif ($glob = glob($dir, (\defined('GLOB_BRACE') ? \GLOB_BRACE : 0) | \GLOB_ONLYDIR | \GLOB_NOSORT)) {
sort($glob);
$resolvedDirs[] = array_map($this->normalizeDir(...), $glob);
} else {
throw new DirectoryNotFoundException(\sprintf('The "%s" directory does not exist.', $dir));
}
}
$this->dirs = array_merge($this->dirs, ...$resolvedDirs);
return $this;
}
/**
* Returns an Iterator for the current Finder configuration.
*
* This method implements the IteratorAggregate interface.
*
* @return \Iterator<non-empty-string, SplFileInfo>
*
* @throws \LogicException if the in() method has not been called
*/
public function getIterator(): \Iterator
{
if (!$this->dirs && !$this->iterators) {
throw new \LogicException('You must call one of in() or append() methods before iterating over a Finder.');
}
if (1 === \count($this->dirs) && !$this->iterators) {
$iterator = $this->searchInDirectory($this->dirs[0]);
} else {
$iterator = new \AppendIterator();
foreach ($this->dirs as $dir) {
$iterator->append(new \IteratorIterator(new LazyIterator(fn () => $this->searchInDirectory($dir))));
}
foreach ($this->iterators as $it) {
$iterator->append(new \IteratorIterator(new LazyIterator(static function () use ($it) {
foreach ($it as $file) {
if (!$file instanceof \SplFileInfo) {
$file = new \SplFileInfo($file);
}
$key = $file->getPathname();
if (!$file instanceof SplFileInfo) {
$file = new SplFileInfo($key, $file->getPath(), $key);
}
yield $key => $file;
}
})));
}
}
if ($this->sort || $this->reverseSorting) {
$iterator = (new SortableIterator($iterator, $this->sort, $this->reverseSorting))->getIterator();
}
return $iterator;
}
/**
* Appends an existing set of files/directories to the finder.
*
* The set can be another Finder, an Iterator, an IteratorAggregate, or even a plain array.
*
* @param iterable<SplFileInfo|\SplFileInfo|string> $iterator
*
* @return $this
*/
public function append(iterable $iterator): static
{
$this->iterators[] = $iterator;
return $this;
}
/**
* Check if any results were found.
*/
public function hasResults(): bool
{
foreach ($this->getIterator() as $_) {
return true;
}
return false;
}
/**
* Counts all the results collected by the iterators.
*/
public function count(): int
{
return iterator_count($this->getIterator());
}
private function searchInDirectory(string $dir): \Iterator
{
$exclude = $this->exclude;
$notPaths = $this->notPaths;
if ($this->pruneFilters) {
$exclude = array_merge($exclude, $this->pruneFilters);
}
if (static::IGNORE_VCS_FILES === (static::IGNORE_VCS_FILES & $this->ignore)) {
$exclude = array_merge($exclude, self::$vcsPatterns);
}
if (static::IGNORE_DOT_FILES === (static::IGNORE_DOT_FILES & $this->ignore)) {
$notPaths[] = '#(^|/)\..+(/|$)#';
}
$minDepth = 0;
$maxDepth = \PHP_INT_MAX;
foreach ($this->depths as $comparator) {
switch ($comparator->getOperator()) {
case '>':
$minDepth = $comparator->getTarget() + 1;
break;
case '>=':
$minDepth = $comparator->getTarget();
break;
case '<':
$maxDepth = $comparator->getTarget() - 1;
break;
case '<=':
$maxDepth = $comparator->getTarget();
break;
default:
$minDepth = $maxDepth = $comparator->getTarget();
}
}
$flags = \RecursiveDirectoryIterator::SKIP_DOTS;
if ($this->followLinks) {
$flags |= \RecursiveDirectoryIterator::FOLLOW_SYMLINKS;
}
if ($this->unixPaths) {
$flags |= \RecursiveDirectoryIterator::UNIX_PATHS;
}
$iterator = new Iterator\RecursiveDirectoryIterator($dir, $flags, $this->ignoreUnreadableDirs);
if ($exclude) {
$iterator = new ExcludeDirectoryFilterIterator($iterator, $exclude);
}
$iterator = new \RecursiveIteratorIterator($iterator, \RecursiveIteratorIterator::SELF_FIRST);
if ($minDepth > 0 || $maxDepth < \PHP_INT_MAX) {
$iterator = new DepthRangeFilterIterator($iterator, $minDepth, $maxDepth);
}
if ($this->mode) {
$iterator = new Iterator\FileTypeFilterIterator($iterator, $this->mode);
}
if ($this->names || $this->notNames) {
$iterator = new FilenameFilterIterator($iterator, $this->names, $this->notNames);
}
if ($this->contains || $this->notContains) {
$iterator = new FilecontentFilterIterator($iterator, $this->contains, $this->notContains);
}
if ($this->sizes) {
$iterator = new SizeRangeFilterIterator($iterator, $this->sizes);
}
if ($this->dates) {
$iterator = new DateRangeFilterIterator($iterator, $this->dates);
}
if ($this->filters) {
$iterator = new CustomFilterIterator($iterator, $this->filters);
}
if ($this->paths || $notPaths) {
$iterator = new Iterator\PathFilterIterator($iterator, $this->paths, $notPaths);
}
if (static::IGNORE_VCS_IGNORED_FILES === (static::IGNORE_VCS_IGNORED_FILES & $this->ignore)) {
$iterator = new Iterator\VcsIgnoredFilterIterator($iterator, $dir);
}
return $iterator;
}
/**
* Normalizes given directory names by removing trailing slashes.
*
* Excluding: (s)ftp:// or ssh2.(s)ftp:// wrapper
*/
private function normalizeDir(string $dir): string
{
if ('/' === $dir) {
return $dir;
}
$dir = rtrim($dir, '/'.\DIRECTORY_SEPARATOR);
if (preg_match('#^(ssh2\.)?s?ftp://#', $dir)) {
$dir .= '/';
}
return $dir;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Component/Finder/Finder.php |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.gradle.tasks.bundling;
import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.gradle.api.Project;
import org.gradle.api.artifacts.ResolvedConfiguration;
import org.gradle.api.artifacts.component.ComponentArtifactIdentifier;
import org.gradle.api.artifacts.component.ComponentIdentifier;
import org.gradle.api.artifacts.component.ModuleComponentIdentifier;
import org.gradle.api.artifacts.component.ProjectComponentIdentifier;
import org.gradle.api.artifacts.result.ResolvedArtifactResult;
import org.gradle.api.provider.ListProperty;
import org.gradle.api.provider.Provider;
import org.gradle.api.tasks.Classpath;
import org.gradle.api.tasks.Input;
import org.gradle.internal.component.external.model.ModuleComponentArtifactIdentifier;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.loader.tools.LibraryCoordinates;
/**
* Maps from {@link File} to {@link ComponentArtifactIdentifier}.
*
* @author Madhura Bhave
* @author Scott Frederick
* @author Phillip Webb
* @author Paddy Drury
* @author Andy Wilkinson
*/
class ResolvedDependencies {
private final Map<String, LibraryCoordinates> projectCoordinatesByPath;
private final ListProperty<ComponentArtifactIdentifier> artifactIds;
private final ListProperty<File> artifactFiles;
ResolvedDependencies(Project project) {
this.artifactIds = project.getObjects().listProperty(ComponentArtifactIdentifier.class);
this.artifactFiles = project.getObjects().listProperty(File.class);
this.projectCoordinatesByPath = projectCoordinatesByPath(project);
}
private static Map<String, LibraryCoordinates> projectCoordinatesByPath(Project project) {
return project.getRootProject()
.getAllprojects()
.stream()
.collect(Collectors.toMap(Project::getPath, ResolvedDependencies::libraryCoordinates));
}
private static LibraryCoordinates libraryCoordinates(Project project) {
return LibraryCoordinates.of(Objects.toString(project.getGroup()), project.getName(),
Objects.toString(project.getVersion()));
}
@Input
ListProperty<ComponentArtifactIdentifier> getArtifactIds() {
return this.artifactIds;
}
@Classpath
ListProperty<File> getArtifactFiles() {
return this.artifactFiles;
}
void resolvedArtifacts(Provider<Set<ResolvedArtifactResult>> resolvedArtifacts) {
this.artifactFiles.addAll(
resolvedArtifacts.map((artifacts) -> artifacts.stream().map(ResolvedArtifactResult::getFile).toList()));
this.artifactIds.addAll(
resolvedArtifacts.map((artifacts) -> artifacts.stream().map(ResolvedArtifactResult::getId).toList()));
}
@Nullable DependencyDescriptor find(File file) {
ComponentArtifactIdentifier id = findArtifactIdentifier(file);
if (id == null) {
return null;
}
if (id instanceof ModuleComponentArtifactIdentifier moduleComponentId) {
ModuleComponentIdentifier moduleId = moduleComponentId.getComponentIdentifier();
return new DependencyDescriptor(
LibraryCoordinates.of(moduleId.getGroup(), moduleId.getModule(), moduleId.getVersion()), false);
}
ComponentIdentifier componentIdentifier = id.getComponentIdentifier();
if (componentIdentifier instanceof ProjectComponentIdentifier projectComponentId) {
String projectPath = projectComponentId.getProjectPath();
LibraryCoordinates projectCoordinates = this.projectCoordinatesByPath.get(projectPath);
if (projectCoordinates != null) {
return new DependencyDescriptor(projectCoordinates, true);
}
}
return null;
}
private @Nullable ComponentArtifactIdentifier findArtifactIdentifier(File file) {
List<File> files = this.artifactFiles.get();
for (int i = 0; i < files.size(); i++) {
if (file.equals(files.get(i))) {
return this.artifactIds.get().get(i);
}
}
return null;
}
/**
* Describes a dependency in a {@link ResolvedConfiguration}.
*/
static final class DependencyDescriptor {
private final LibraryCoordinates coordinates;
private final boolean projectDependency;
private DependencyDescriptor(LibraryCoordinates coordinates, boolean projectDependency) {
this.coordinates = coordinates;
this.projectDependency = projectDependency;
}
LibraryCoordinates getCoordinates() {
return this.coordinates;
}
boolean isProjectDependency() {
return this.projectDependency;
}
}
} | java | github | https://github.com/spring-projects/spring-boot | build-plugin/spring-boot-gradle-plugin/src/main/java/org/springframework/boot/gradle/tasks/bundling/ResolvedDependencies.java |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sorting operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaSortOpTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.cached_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
if isinstance(output, ops.Tensor):
output = [output]
results = session.run(output, feeds)
for result, v in zip(results, expected):
self.assertAllClose(v, result, rtol=1e-3)
def testSort(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=dtype)
np.random.shuffle(x)
self._assertOpOutputMatchesExpected(
xla.sort, [x], expected=[np.arange(101, dtype=dtype)])
def testKeyValueSort(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for key_type in supported_types.intersection(self.numeric_types):
for value_type in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=key_type)
np.random.shuffle(x)
y = (-x).astype(value_type)
self._assertOpOutputMatchesExpected(
xla.key_value_sort, [x, y],
expected=[
np.arange(101, dtype=key_type),
-np.arange(101, dtype=value_type)
])
def testTopK(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 20
k_options = [0, 1, 2, 10, 20]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
for x in [np.arange(array_size)]:
np.random.shuffle(x)
for k in k_options:
indices = x.argsort()[::-1][:k]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[x[indices].astype(dtype), indices])
def testTopK2D(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 10
k_options = [0, 1, 2, 10]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = np.sort(x, axis=1)[::, -1:-k - 1:-1]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[expected.astype(dtype), indices])
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.cached_session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=4)
results = sess.run(
topk,
{p: np.array([0., -0., 0., 3., -0., -4., 0., -0.], dtype=bfloat16)})
self.assertAllEqual(
np.array([3., 0., 0., 0.], dtype=bfloat16), results[0])
self.assertEqual(list([3, 0, 2, 6]), list(results[1]))
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.cached_session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=6)
results = sess.run(topk, {
p: np.array(
[1, 2, float("inf"), -float("inf"), -1, -2], dtype=bfloat16)
})
self.assertAllEqual(
np.array(
[float("inf"), 2.0, 1.0, -1.0, -2.0, -float("inf")],
dtype=bfloat16), results[0])
self.assertEqual(list([2, 1, 0, 4, 5, 3]), list(results[1]))
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
from xadmin.sites import AdminSite, site
VERSION = [0,4,4]
class Settings(object):
pass
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
setattr(settings, 'CRISPY_TEMPLATE_PACK', 'bootstrap3')
setattr(settings, 'CRISPY_CLASS_CONVERTERS', {
"textinput": "textinput textInput form-control",
"fileinput": "fileinput fileUpload form-control",
"passwordinput": "textinput textInput form-control",
})
from xadmin.views import register_builtin_views
register_builtin_views(site)
# load xadmin settings from XADMIN_CONF module
try:
xadmin_conf = getattr(settings, 'XADMIN_CONF', 'xadmin_conf.py')
conf_mod = import_module(xadmin_conf)
except Exception:
conf_mod = None
if conf_mod:
for key in dir(conf_mod):
setting = getattr(conf_mod, key)
try:
if issubclass(setting, Settings):
site.register_settings(setting.__name__, setting)
except Exception:
pass
from xadmin.plugins import register_builtin_plugins
register_builtin_plugins(site)
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = site.copy_registry()
import_module('%s.adminx' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site.restore_registry(before_import_registry)
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'adminx'):
raise | unknown | codeparrot/codeparrot-clean | ||
import os
import xapian
from djapian.utils.decorators import reopen_if_modified
class Database(object):
def __init__(self, path):
self._path = path
def open(self, write=False):
"""
Opens database for manipulations
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
if write:
database = xapian.WritableDatabase(
self._path,
xapian.DB_CREATE_OR_OPEN,
)
else:
try:
database = xapian.Database(self._path)
except xapian.DatabaseOpeningError:
self.create_database()
database = xapian.Database(self._path)
return database
def create_database(self):
database = xapian.WritableDatabase(
self._path,
xapian.DB_CREATE_OR_OPEN,
)
del database
def document_count(self):
database = self.open()
return reopen_if_modified(database)(lambda: database.get_doccount())()
def clear(self):
try:
for file_path in os.listdir(self._path):
os.remove(os.path.join(self._path, file_path))
os.rmdir(self._path)
except OSError:
pass
class CompositeDatabase(Database):
def __init__(self, dbs):
self._dbs = dbs
def open(self, write=False):
if write:
raise ValueError("Composite database cannot be opened for writing")
base = self._dbs[0]
raw = base.open()
for db in self._dbs[1:]:
raw.add_database(db.open())
return raw
def create_database(self):
raise NotImplementedError
def clear(self):
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2002 - 2013 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the debug base class.
"""
import sys
import traceback
import bdb
import os
import types
import atexit
import inspect
from DebugProtocol import *
gRecursionLimit = 64
def printerr(s):
"""
Module function used for debugging the debug client.
@param s data to be printed
"""
import sys
sys.__stderr__.write('%s\n' % unicode(s))
sys.__stderr__.flush()
def setRecursionLimit(limit):
"""
Module function to set the recursion limit.
@param limit recursion limit (integer)
"""
global gRecursionLimit
gRecursionLimit = limit
class DebugBase(bdb.Bdb):
"""
Class implementing base class of the debugger.
Provides simple wrapper methods around bdb for the 'owning' client to
call to step etc.
"""
def __init__(self, dbgClient):
"""
Constructor
@param dbgClient the owning client
"""
bdb.Bdb.__init__(self)
self._dbgClient = dbgClient
self._mainThread = 1
self.breaks = self._dbgClient.breakpoints
self.__event = ""
self.__isBroken = ""
self.cFrame = None
# current frame we are at
self.currentFrame = None
self.currentFrameLocals = None
# frame that we are stepping in, can be different than currentFrame
self.stepFrame = None
# provide a hook to perform a hard breakpoint
# Use it like this:
# if hasattr(sys, 'breakpoint): sys.breakpoint()
sys.breakpoint = self.set_trace
# initialize parent
bdb.Bdb.reset(self)
self.__recursionDepth = -1
self.setRecursionDepth(inspect.currentframe())
def getCurrentFrame(self):
"""
Public method to return the current frame.
@return the current frame
"""
return self.currentFrame
def getCurrentFrameLocals(self):
"""
Public method to return the locals dictionary of the current frame.
@return locals dictionary of the current frame
"""
return self.currentFrameLocals
def step(self, traceMode):
"""
Public method to perform a step operation in this thread.
@param traceMode If it is non-zero, then the step is a step into,
otherwise it is a step over.
"""
self.stepFrame = self.currentFrame
if traceMode:
self.currentFrame = None
self.set_step()
else:
self.set_next(self.currentFrame)
def stepOut(self):
"""
Public method to perform a step out of the current call.
"""
self.stepFrame = self.currentFrame
self.set_return(self.currentFrame)
def go(self, special):
"""
Public method to resume the thread.
It resumes the thread stopping only at breakpoints or exceptions.
@param special flag indicating a special continue operation
"""
self.currentFrame = None
self.set_continue(special)
def setRecursionDepth(self, frame):
"""
Public method to determine the current recursion depth.
@param frame The current stack frame.
"""
self.__recursionDepth = 0
while frame is not None:
self.__recursionDepth += 1
frame = frame.f_back
def profile(self, frame, event, arg):
"""
Public method used to trace some stuff independent of the debugger
trace function.
@param frame The current stack frame.
@param event The trace event (string)
@param arg The arguments
"""
if event == 'return':
self.cFrame = frame.f_back
self.__recursionDepth -= 1
elif event == 'call':
self.cFrame = frame
self.__recursionDepth += 1
if self.__recursionDepth > gRecursionLimit:
raise RuntimeError('maximum recursion depth exceeded\n'
'(offending frame is two down the stack)')
def trace_dispatch(self, frame, event, arg):
"""
Reimplemented from bdb.py to do some special things.
This specialty is to check the connection to the debug server
for new events (i.e. new breakpoints) while we are going through
the code.
@param frame The current stack frame.
@param event The trace event (string)
@param arg The arguments
@return local trace function
"""
if self.quitting:
return # None
# give the client a chance to push through new break points.
self._dbgClient.eventPoll()
self.__event == event
self.__isBroken = False
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'DebugBase.trace_dispatch: unknown debugging event:', `event`
return self.trace_dispatch
def dispatch_line(self, frame):
"""
Reimplemented from bdb.py to do some special things.
This speciality is to check the connection to the debug server
for new events (i.e. new breakpoints) while we are going through
the code.
@param frame The current stack frame.
@return local trace function
"""
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise bdb.BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
"""
Reimplemented from bdb.py to handle passive mode cleanly.
@param frame The current stack frame.
@param arg The arguments
@return local trace function
"""
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting and not self._dbgClient.passive:
raise bdb.BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
"""
Reimplemented from bdb.py to always call user_exception.
@param frame The current stack frame.
@param arg The arguments
@return local trace function
"""
if not self.__skip_it(frame):
self.user_exception(frame, arg)
if self.quitting: raise bdb.BdbQuit
return self.trace_dispatch
def set_trace(self, frame = None):
"""
Overridden method of bdb.py to do some special setup.
@param frame frame to start debugging from
"""
bdb.Bdb.set_trace(self, frame)
sys.setprofile(self.profile)
def set_continue(self, special):
"""
Reimplemented from bdb.py to always get informed of exceptions.
@param special flag indicating a special continue operation
"""
# Modified version of the one found in bdb.py
# Here we only set a new stop frame if it is a normal continue.
if not special:
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 0
def set_quit(self):
"""
Public method to quit.
It wraps call to bdb to clear the current frame properly.
"""
self.currentFrame = None
sys.setprofile(None)
bdb.Bdb.set_quit(self)
def fix_frame_filename(self, frame):
"""
Public method used to fixup the filename for a given frame.
The logic employed here is that if a module was loaded
from a .pyc file, then the correct .py to operate with
should be in the same path as the .pyc. The reason this
logic is needed is that when a .pyc file is generated, the
filename embedded and thus what is readable in the code object
of the frame object is the fully qualified filepath when the
pyc is generated. If files are moved from machine to machine
this can break debugging as the .pyc will refer to the .py
on the original machine. Another case might be sharing
code over a network... This logic deals with that.
@param frame the frame object
"""
# get module name from __file__
if frame.f_globals.has_key('__file__') and \
frame.f_globals['__file__'] and \
frame.f_globals['__file__'] == frame.f_code.co_filename:
root, ext = os.path.splitext(frame.f_globals['__file__'])
if ext == '.pyc' or ext == '.py' or ext == '.pyo':
fixedName = root + '.py'
if os.path.exists(fixedName):
return fixedName
return frame.f_code.co_filename
def set_watch(self, cond, temporary=0):
"""
Public method to set a watch expression.
@param cond expression of the watch expression (string)
@param temporary flag indicating a temporary watch expression (boolean)
"""
bp = bdb.Breakpoint("Watch", 0, temporary, cond)
if cond.endswith('??created??') or cond.endswith('??changed??'):
bp.condition, bp.special = cond.split()
else:
bp.condition = cond
bp.special = ""
bp.values = {}
if not self.breaks.has_key("Watch"):
self.breaks["Watch"] = 1
else:
self.breaks["Watch"] += 1
def clear_watch(self, cond):
"""
Public method to clear a watch expression.
@param cond expression of the watch expression to be cleared (string)
"""
try:
possibles = bdb.Breakpoint.bplist["Watch", 0]
for i in range(0, len(possibles)):
b = possibles[i]
if b.cond == cond:
b.deleteMe()
self.breaks["Watch"] -= 1
if self.breaks["Watch"] == 0:
del self.breaks["Watch"]
break
except KeyError:
pass
def get_watch(self, cond):
"""
Public method to get a watch expression.
@param cond expression of the watch expression to be cleared (string)
"""
possibles = bdb.Breakpoint.bplist["Watch", 0]
for i in range(0, len(possibles)):
b = possibles[i]
if b.cond == cond:
return b
def __do_clearWatch(self, cond):
"""
Private method called to clear a temporary watch expression.
@param cond expression of the watch expression to be cleared (string)
"""
self.clear_watch(cond)
self._dbgClient.write('%s%s\n' % (ResponseClearWatch, cond))
def __effective(self, frame):
"""
Private method to determine, if a watch expression is effective.
@param frame the current execution frame
@return tuple of watch expression and a flag to indicate, that a temporary
watch expression may be deleted (bdb.Breakpoint, boolean)
"""
possibles = bdb.Breakpoint.bplist["Watch", 0]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not b.cond:
# watch expression without expression shouldn't occur, just ignore it
continue
try:
val = eval(b.condition, frame.f_globals, frame.f_locals)
if b.special:
if b.special == '??created??':
if b.values[frame][0] == 0:
b.values[frame][0] = 1
b.values[frame][1] = val
return (b, 1)
else:
continue
b.values[frame][0] = 1
if b.special == '??changed??':
if b.values[frame][1] != val:
b.values[frame][1] = val
if b.values[frame][2] > 0:
b.values[frame][2] -= 1
continue
else:
return (b, 1)
else:
continue
continue
if val:
if b.ignore > 0:
b.ignore -= 1
continue
else:
return (b, 1)
except:
if b.special:
try:
b.values[frame][0] = 0
except KeyError:
b.values[frame] = [0, None, b.ignore]
continue
return (None, None)
def break_here(self, frame):
"""
Reimplemented from bdb.py to fix the filename from the frame.
See fix_frame_filename for more info.
@param frame the frame object
@return flag indicating the break status (boolean)
"""
filename = self.canonic(self.fix_frame_filename(frame))
if not self.breaks.has_key(filename) and not self.breaks.has_key("Watch"):
return 0
if self.breaks.has_key(filename):
lineno = frame.f_lineno
if lineno in self.breaks[filename]:
# flag says ok to delete temp. bp
(bp, flag) = bdb.effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.__do_clear(filename, lineno)
return 1
if self.breaks.has_key("Watch"):
# flag says ok to delete temp. bp
(bp, flag) = self.__effective(frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.__do_clearWatch(bp.cond)
return 1
return 0
def break_anywhere(self, frame):
"""
Reimplemented from bdb.py to do some special things.
These speciality is to fix the filename from the frame
(see fix_frame_filename for more info).
@param frame the frame object
@return flag indicating the break status (boolean)
"""
return self.breaks.has_key(
self.canonic(self.fix_frame_filename(frame))) or \
(self.breaks.has_key("Watch") and self.breaks["Watch"])
def get_break(self, filename, lineno):
"""
Reimplemented from bdb.py to get the first breakpoint of a particular line.
Because eric4 supports only one breakpoint per line, this overwritten
method will return this one and only breakpoint.
@param filename the filename of the bp to retrieve (string)
@param ineno the linenumber of the bp to retrieve (integer)
@return breakpoint or None, if there is no bp
"""
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename] and \
bdb.Breakpoint.bplist[filename, lineno][0] or None
def __do_clear(self, filename, lineno):
"""
Private method called to clear a temporary breakpoint.
@param filename name of the file the bp belongs to
@param lineno linenumber of the bp
"""
self.clear_break(filename, lineno)
self._dbgClient.write('%s%s,%d\n' % (ResponseClearBreak, filename, lineno))
def getStack(self):
"""
Public method to get the stack.
@return list of lists with file name (string), line number (integer)
and function name (string)
"""
fr = self.cFrame
stack = []
while fr is not None:
fname = self._dbgClient.absPath(self.fix_frame_filename(fr))
fline = fr.f_lineno
ffunc = fr.f_code.co_name
if ffunc == '?':
ffunc = ''
stack.append([fname, fline, ffunc])
if fr == self._dbgClient.mainFrame:
fr = None
else:
fr = fr.f_back
return stack
def user_line(self, frame):
"""
Reimplemented to handle the program about to execute a particular line.
@param frame the frame object
"""
line = frame.f_lineno
# We never stop on line 0.
if line == 0:
return
fn = self._dbgClient.absPath(self.fix_frame_filename(frame))
# See if we are skipping at the start of a newly loaded program.
if self._dbgClient.mainFrame is None:
if fn != self._dbgClient.getRunning():
return
self._dbgClient.mainFrame = frame
self.currentFrame = frame
self.currentFrameLocals = frame.f_locals
# remember the locals because it is reinitialized when accessed
fr = frame
stack = []
while fr is not None:
# Reset the trace function so we can be sure
# to trace all functions up the stack... This gets around
# problems where an exception/breakpoint has occurred
# but we had disabled tracing along the way via a None
# return from dispatch_call
fr.f_trace = self.trace_dispatch
fname = self._dbgClient.absPath(self.fix_frame_filename(fr))
fline = fr.f_lineno
ffunc = fr.f_code.co_name
if ffunc == '?':
ffunc = ''
stack.append([fname, fline, ffunc])
if fr == self._dbgClient.mainFrame:
fr = None
else:
fr = fr.f_back
self.__isBroken = True
self._dbgClient.write('%s%s\n' % (ResponseLine, unicode(stack)))
self._dbgClient.eventLoop()
def user_exception(self,frame,(exctype,excval,exctb),unhandled=0):
"""
Reimplemented to report an exception to the debug server.
@param frame the frame object
@param exctype the type of the exception
@param excval data about the exception
@param exctb traceback for the exception
@param unhandled flag indicating an uncaught exception
"""
if exctype in [SystemExit, bdb.BdbQuit]:
atexit._run_exitfuncs()
if excval is None:
excval = 0
elif isinstance(excval, (unicode, str)):
self._dbgClient.write(excval)
excval = 1
if isinstance(excval, int):
self._dbgClient.progTerminated(excval)
else:
self._dbgClient.progTerminated(excval.code)
return
elif exctype in [SyntaxError, IndentationError]:
try:
message, (filename, linenr, charnr, text) = excval
except ValueError:
exclist = []
else:
exclist = [message, [filename, linenr, charnr]]
self._dbgClient.write("%s%s\n" % (ResponseSyntax, unicode(exclist)))
else:
if type(exctype) in [types.ClassType, # Python up to 2.4
types.TypeType]: # Python 2.5+
exctype = exctype.__name__
if excval is None:
excval = ''
if unhandled:
exctypetxt = "unhandled %s" % unicode(exctype)
else:
exctypetxt = unicode(exctype)
try:
exclist = [exctypetxt,
unicode(excval).encode(self._dbgClient.getCoding())]
except TypeError:
exclist = [exctypetxt, str(excval)]
if exctb:
frlist = self.__extract_stack(exctb)
frlist.reverse()
self.currentFrame = frlist[0]
self.currentFrameLocals = frlist[0].f_locals
# remember the locals because it is reinitialized when accessed
for fr in frlist:
filename = self._dbgClient.absPath(self.fix_frame_filename(fr))
linenr = fr.f_lineno
if os.path.basename(filename).startswith("DebugClient") or \
os.path.basename(filename) == "bdb.py":
break
exclist.append([filename, linenr])
self._dbgClient.write("%s%s\n" % (ResponseException, unicode(exclist)))
if exctb is None:
return
self._dbgClient.eventLoop()
def __extract_stack(self, exctb):
"""
Private member to return a list of stack frames.
@param exctb exception traceback
@return list of stack frames
"""
tb = exctb
stack = []
while tb is not None:
stack.append(tb.tb_frame)
tb = tb.tb_next
tb = None
return stack
def user_return(self,frame,retval):
"""
Reimplemented to report program termination to the debug server.
@param frame the frame object
@param retval the return value of the program
"""
# The program has finished if we have just left the first frame.
if frame == self._dbgClient.mainFrame and \
self._mainThread:
atexit._run_exitfuncs()
self._dbgClient.progTerminated(retval)
elif frame is not self.stepFrame:
self.stepFrame = None
self.user_line(frame)
def stop_here(self,frame):
"""
Reimplemented to filter out debugger files.
Tracing is turned off for files that are part of the
debugger that are called from the application being debugged.
@param frame the frame object
@return flag indicating whether the debugger should stop here
"""
if self.__skip_it(frame):
return 0
return bdb.Bdb.stop_here(self,frame)
def __skip_it(self, frame):
"""
Private method to filter out debugger files.
Tracing is turned off for files that are part of the
debugger that are called from the application being debugged.
@param frame the frame object
@return flag indicating whether the debugger should skip this frame
"""
fn = self.fix_frame_filename(frame)
# Eliminate things like <string> and <stdin>.
if fn[0] == '<':
return 1
#XXX - think of a better way to do this. It's only a convience for
#debugging the debugger - when the debugger code is in the current
#directory.
if os.path.basename(fn) in [\
'AsyncFile.py', 'AsyncIO.py',
'DebugConfig.py', 'DCTestResult.py',
'DebugBase.py', 'DebugClientBase.py',
'DebugClientCapabilities.py', 'DebugClient.py',
'DebugClientThreads.py', 'DebugProtocol.py',
'DebugThread.py', 'FlexCompleter.py',
'PyProfile.py'] or \
os.path.dirname(fn).endswith("coverage"):
return 1
if self._dbgClient.shouldSkip(fn):
return 1
return 0
def isBroken(self):
"""
Public method to return the broken state of the debugger.
@return flag indicating the broken state (boolean)
"""
return self.__isBroken
def getEvent(self):
"""
Public method to return the last debugger event.
@return last debugger event (string)
"""
return self.__event | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description:
- Whether the check should be present or not
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
required: false
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
required: false
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
required: false
default: []
interval:
description:
- Check interval in seconds
required: false
default: null
timeout:
description:
- Timeout for the check
required: false
default: 10
handle:
description:
- Whether the check should be handled or not
choices: [ 'yes', 'no' ]
required: false
default: yes
subdue_begin:
description:
- When to disable handling of check failures
required: false
default: null
subdue_end:
description:
- When to enable handling of check failures
required: false
default: null
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
required: false
default: []
metric:
description:
- Whether the check is a metric
choices: [ 'yes', 'no' ]
required: false
default: no
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
choices: [ 'yes', 'no' ]
required: false
default: no
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
choices: [ 'yes', 'no' ]
required: false
default: yes
occurrences:
description:
- Number of event occurrences before the handler should take action
required: false
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
required: false
default: null
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
choices: [ 'yes', 'no' ]
required: false
default: no
low_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
high_flap_threshold:
description:
- The high threshhold for flap detection
required: false
default: null
custom:
version_added: "2.1"
description:
- A hash/dictionary of custom parameters for mixing to the configuration.
- You can't rewrite others module parameters using this
required: false
default: {}
source:
version_added: "2.1"
description:
- The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
required: false
default: null
requirements: [ ]
author: "Anders Ingemann (@andsens)"
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check:
name: cpu_load
command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric: yes
handlers: relay
subscribers: common
interval: 60
# Check whether nginx is running
- name: check nginx process
sensu_check:
name: nginx_running
command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
handlers: default
subscribers: nginx
interval: 60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check:
name: check_disk_capacity
state: absent
'''
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
stream = None
try:
try:
stream = open(path, 'r')
config = json.load(stream)
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
'source',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['custom']:
# Convert to json
custom_params = module.params['custom']
overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg)
for k,v in custom_params.items():
if k in config['checks'][name]:
if not config['checks'][name][k] == v:
changed = True
reasons.append('`custom param {opt}\' was changed'.format(opt=k))
else:
changed = True
reasons.append('`custom param {opt}\' was added'.format(opt=k))
check[k] = v
simple_opts += custom_params.keys()
# Remove obsolete custom params
for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt]
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg=str(e))
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
'custom': {'type': 'dict'},
'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
from __future__ import division, absolute_import, print_function
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
from numpy.testing._private.pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
import calendar
import json
import pytz
import inspect
import sys
import threading
import django
from django.db import models
from django.utils.timezone import now
from django.db.models import *
from django.db import transaction
from django.forms.models import model_to_dict
from django.utils import timezone
from django.core.exceptions import PermissionDenied
from cgi import escape as html_escape
from django.db.models.deletion import Collector
from django.db import router
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MaxValueValidator, MinValueValidator
from xoskafka import XOSKafkaProducer
from xosconfig import Config
from multistructlog import create_logger
log = create_logger(Config().get("logging"))
XOS_GLOBAL_DEFAULT_SECURITY_POLICY = True
def get_first_site():
# Hackish solution to Node.site needing a default
from site import Site
return Site.objects.first().id
def json_handler(obj):
if isinstance(obj, pytz.tzfile.DstTzInfo):
# json can't serialize DstTzInfo
return str(obj)
elif hasattr(obj, "timetuple"):
return calendar.timegm(obj.timetuple())
elif isinstance(obj, QuerySet):
# django 1.11.0 - model_to_dict() turns reverse foreign relations into querysets
return [x.id for x in obj]
elif isinstance(obj, Model):
# django 1.11.10 - model_to_dict() turns reverse foreign relations into lists of models
return obj.id
else:
return obj
class StrippedCharField(models.CharField):
""" CharField that strips trailing and leading spaces."""
def clean(self, value, *args, **kwds):
if value is not None:
value = value.strip()
return super(StrippedCharField, self).clean(value, *args, **kwds)
# This manager will be inherited by all subclasses because
# the core model is abstract.
class XOSBaseDeletionManager(models.Manager):
def get_queryset(self):
parent = super(XOSBaseDeletionManager, self)
if hasattr(parent, "get_queryset"):
return parent.get_queryset().filter(deleted=True)
else:
return parent.get_query_set().filter(deleted=True)
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
# This manager will be inherited by all subclasses because
# the core model is abstract.
class XOSBaseManager(models.Manager):
def get_queryset(self):
parent = super(XOSBaseManager, self)
if hasattr(parent, "get_queryset"):
return parent.get_queryset().filter(deleted=False)
else:
return parent.get_query_set().filter(deleted=False)
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class PlModelMixIn(object):
# Provides useful methods for computing which objects in a model have
# changed. Make sure to do self._initial = self._dict in the __init__
# method.
# Also includes useful utility, like getValidators
# This is broken out of XOSBase into a Mixin so the User model can
# also make use of it.
@property
def _dict(self):
return model_to_dict(self, fields=[field.name for field in self._meta.fields])
def fields_differ(self, f1, f2):
if (
isinstance(f1, datetime.datetime)
and isinstance(f2, datetime.datetime)
and (timezone.is_aware(f1) != timezone.is_aware(f2))
):
return True
else:
return f1 != f2
@property
def diff(self):
d1 = self._initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if self.fields_differ(v, d2[k])]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
if self.is_new:
return self._dict.keys()
return self.diff.keys()
@property
def is_new(self):
return self.pk is None
def has_field_changed(self, field_name):
return field_name in self.diff.keys()
def get_field_diff(self, field_name):
return self.diff.get(field_name, None)
@classmethod
def get_model_class_by_name(cls, name):
all_models = django.apps.apps.get_models(include_auto_created=False)
all_models_by_name = {}
for model in all_models:
all_models_by_name[model.__name__] = model
return all_models_by_name.get(name)
@property
def leaf_model(self):
leaf_model_name = getattr(self, "leaf_model_name", None)
if not leaf_model_name:
return self
if leaf_model_name == self.__class__.__name__:
return self
leaf_model_class = self.get_model_class_by_name(self.leaf_model_name)
assert self.id
if self.deleted:
return leaf_model_class.deleted_objects.get(id=self.id)
else:
return leaf_model_class.objects.get(id=self.id)
# classmethod
def getValidators(cls):
""" primarily for REST API, return a dictionary of field names mapped
to lists of the type of validations that need to be applied to
those fields.
"""
validators = {}
for field in cls._meta.fields:
l = []
if not field.blank:
l.append("notBlank")
if field.__class__.__name__ == "URLField":
l.append("url")
validators[field.name] = l
return validators
def get_backend_register(self, k, default=None):
try:
return json.loads(self.backend_register).get(k, default)
except AttributeError:
return default
def set_backend_register(self, k, v):
br = {}
try:
br = json.loads(self.backend_register)
except AttributeError:
br = {}
br[k] = v
self.backend_register = json.dumps(br)
def get_backend_details(self):
try:
scratchpad = json.loads(self.backend_register)
except AttributeError:
return (None, None, None, None)
try:
exponent = scratchpad["exponent"]
except KeyError:
exponent = None
try:
last_success_time = scratchpad["last_success"]
dt = datetime.datetime.fromtimestamp(last_success_time)
last_success = dt.strftime("%Y-%m-%d %H:%M")
except KeyError:
last_success = None
try:
failures = scratchpad["failures"]
except KeyError:
failures = None
try:
last_failure_time = scratchpad["last_failure"]
dt = datetime.datetime.fromtimestamp(last_failure_time)
last_failure = dt.strftime("%Y-%m-%d %H:%M")
except KeyError:
last_failure = None
return (exponent, last_success, last_failure, failures)
def get_backend_icon(self):
is_perfect = (
self.backend_status is not None
) and self.backend_status.startswith("1 -")
is_good = (self.backend_status is not None) and (
self.backend_status.startswith("0 -")
or self.backend_status.startswith("1 -")
)
is_provisioning = (
self.backend_status is None
or self.backend_status == "Provisioning in progress"
or self.backend_status == ""
)
# returns (icon_name, tooltip)
if (
(self.enacted is not None)
and (self.enacted >= self.updated and is_good)
or is_perfect
):
return ("success", "successfully enacted")
else:
if is_good or is_provisioning:
return (
"clock",
"Pending sync, last_status = "
+ html_escape(self.backend_status, quote=True),
)
else:
return ("error", html_escape(self.backend_status, quote=True))
def enforce_choices(self, field, choices):
choices = [x[0] for x in choices]
for choice in choices:
if field == choice:
return
if (choice is None) and (field == ""):
# allow "" and None to be equivalent
return
raise Exception("Field value %s is not in %s" % (field, str(choices)))
def serialize_for_messagebus(self):
""" Serialize the object for posting to messagebus.
The API serializes ForeignKey fields by naming them <name>_id
whereas model_to_dict leaves them with the original name. Modify
the results of model_to_dict to provide the same fieldnames.
"""
field_types = {}
for f in self._meta.fields:
field_types[f.name] = f.get_internal_type()
fields = model_to_dict(self)
for k in fields.keys():
if field_types.get(k, None) == "ForeignKey":
new_key_name = "%s_id" % k
if (k in fields) and (new_key_name not in fields):
fields[new_key_name] = fields[k]
del fields[k]
return fields
def push_messagebus_event(self, deleted=False, pk=None):
self.push_kafka_event(deleted, pk)
def push_kafka_event(self, deleted=False, pk=None):
# Transmit update via kafka
model = self.serialize_for_messagebus()
bases = inspect.getmro(self.__class__)
class_names = ",".join([x.__name__ for x in bases])
model["class_names"] = class_names
if not pk:
pk = self.pk
json_dict = {"pk": pk, "changed_fields": self.changed_fields, "object": model}
if deleted:
json_dict["deleted"] = True
json_dict["object"]["id"] = pk
topic = "xos.gui_events"
key = self.__class__.__name__
json_value = json.dumps(json_dict, default=json_handler)
XOSKafkaProducer.produce(topic, key, json_value)
class AttributeMixin(object):
# helper for extracting things from a json-encoded
# service_specific_attribute
def get_attribute(self, name, default=None):
if self.service_specific_attribute:
attributes = json.loads(self.service_specific_attribute)
else:
attributes = {}
return attributes.get(name, default)
def set_attribute(self, name, value):
if self.service_specific_attribute:
attributes = json.loads(self.service_specific_attribute)
else:
attributes = {}
attributes[name] = value
self.service_specific_attribute = json.dumps(attributes)
def get_initial_attribute(self, name, default=None):
if self._initial["service_specific_attribute"]:
attributes = json.loads(self._initial["service_specific_attribute"])
else:
attributes = {}
return attributes.get(name, default)
@classmethod
def get_default_attribute(cls, name):
for (attrname, default) in cls.simple_attributes:
if attrname == name:
return default
if hasattr(cls, "default_attributes"):
if name in cls.default_attributes:
return cls.default_attributes[name]
return None
@classmethod
def setup_simple_attributes(cls):
for (attrname, default) in cls.simple_attributes:
setattr(
cls,
attrname,
property(
lambda self, attrname=attrname, default=default: self.get_attribute(
attrname, default
),
lambda self, value, attrname=attrname: self.set_attribute(
attrname, value
),
None,
attrname,
),
)
# For cascading deletes, we need a Collector that doesn't do fastdelete,
# so we get a full list of models.
class XOSCollector(Collector):
def can_fast_delete(self, *args, **kwargs):
return False
class ModelLink:
def __init__(self, dest, via, into=None):
self.dest = dest
self.via = via
self.into = into | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: npm
short_description: Manage node.js packages with npm
description:
- Manage node.js packages with Node Package Manager (npm)
version_added: 1.2
author: "Chris Hoffman (@chrishoffman)"
options:
name:
description:
- The name of a node.js library to install
required: false
path:
description:
- The base path where to install the node.js libraries
required: false
version:
description:
- The version to be installed
required: false
global:
description:
- Install the node.js library globally
required: false
default: no
choices: [ "yes", "no" ]
executable:
description:
- The executable location for npm.
- This is useful if you are using a version manager, such as nvm
required: false
ignore_scripts:
description:
- Use the --ignore-scripts flag when installing.
required: false
choices: [ "yes", "no" ]
default: no
version_added: "1.8"
production:
description:
- Install dependencies in production mode, excluding devDependencies
required: false
choices: [ "yes", "no" ]
default: no
registry:
description:
- The registry to install modules from.
required: false
version_added: "1.6"
state:
description:
- The state of the node.js library
required: false
default: present
choices: [ "present", "absent", "latest" ]
'''
EXAMPLES = '''
description: Install "coffee-script" node.js package.
- npm:
name: coffee-script
path: /app/location
description: Install "coffee-script" node.js package on version 1.6.1.
- npm:
name: coffee-script
version: '1.6.1'
path: /app/location
description: Install "coffee-script" node.js package globally.
- npm:
name: coffee-script
global: yes
description: Remove the globally package "coffee-script".
- npm:
name: coffee-script
global: yes
state: absent
description: Install "coffee-script" node.js package from custom registry.
- npm:
name: coffee-script
registry: 'http://registry.mysite.com'
description: Install packages based on package.json.
- npm:
path: /app/location
description: Update packages based on package.json to their latest version.
- npm:
path: /app/location
state: latest
description: Install packages based on package.json using the npm installed with nvm v0.10.1.
- npm:
path: /app/location
executable: /opt/nvm/v0.10.1/bin/npm
state: present
'''
import os
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
class Npm(object):
def __init__(self, module, **kwargs):
self.module = module
self.glbl = kwargs['glbl']
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
self.registry = kwargs['registry']
self.production = kwargs['production']
self.ignore_scripts = kwargs['ignore_scripts']
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
else:
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version']:
self.name_version = self.name + '@' + str(self.version)
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = self.executable + args
if self.glbl:
cmd.append('--global')
if self.production:
cmd.append('--production')
if self.ignore_scripts:
cmd.append('--ignore-scripts')
if self.name:
cmd.append(self.name_version)
if self.registry:
cmd.append('--registry')
cmd.append(self.registry)
#If path is specified, cd into that path and run the command.
cwd = None
if self.path:
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
missing.append(dep)
else:
installed.append(dep)
if self.name and self.name not in installed:
missing.append(self.name)
#Named dependency not installed
else:
missing.append(self.name)
return installed, missing
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def list_outdated(self):
outdated = list()
data = self._exec(['outdated'], True, False)
for dep in data.splitlines():
if dep:
# node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1)
outdated.append(pkg)
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
executable=executable, registry=registry, ignore_scripts=ignore_scripts)
changed = False
if state == 'present':
installed, missing = npm.list()
if len(missing):
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if len(missing):
changed = True
npm.install()
if len(outdated):
changed = True
npm.update()
else: #absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# ext/orderinglist.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`_orm.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`_orm.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, alleviating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection
from ..orm.collections import collection_adapter
__all__ = ["ordering_list"]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = "count_from_%i" % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop("count_from", None)
if kw.get("ordering_func", None) is None and count_from is not None:
if count_from == 0:
kw["ordering_func"] = count_from_0
elif count_from == 1:
kw["ordering_func"] = count_from_1
else:
kw["ordering_func"] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`_orm.relationship` function.
"""
def __init__(
self, ordering_attr=None, ordering_func=None, reorder_on_append=False
):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
"""Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package logical
import (
"context"
"crypto"
"io"
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
)
//go:generate enumer -type=KeyUsage -trimprefix=KeyUsage -transform=snake
type KeyUsage int
const (
KeyUsageEncrypt KeyUsage = 1 + iota
KeyUsageDecrypt
KeyUsageSign
KeyUsageVerify
KeyUsageWrap
KeyUsageUnwrap
KeyUsageGenerateRandom
)
type ManagedKey interface {
// Name is a human-readable identifier for a managed key that may change/renamed. Use Uuid if a
// long term consistent identifier is needed.
Name() string
// UUID is a unique identifier for a managed key that is guaranteed to remain
// consistent even if a key is migrated or renamed.
UUID() string
// Present returns true if the key is established in the KMS. This may return false if for example
// an HSM library is not configured on all cluster nodes.
Present(ctx context.Context) (bool, error)
// AllowsAll returns true if all the requested usages are supported by the managed key.
AllowsAll(usages []KeyUsage) bool
}
type (
ManagedKeyConsumer func(context.Context, ManagedKey) error
ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error
ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error
ManagedMACKeyConsumer func(context.Context, ManagedMACKey) error
ManagedKeyRandomSourceConsumer func(context.Context, ManagedKeyRandomSource) error
)
type ManagedKeySystemView interface {
// WithManagedKeyByName retrieves an instantiated managed key for consumption by the given function. The
// provided key can only be used within the scope of that function call
WithManagedKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedKeyConsumer) error
// WithManagedKeyByUUID retrieves an instantiated managed key for consumption by the given function. The
// provided key can only be used within the scope of that function call
WithManagedKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedKeyConsumer) error
// WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByName
WithManagedSigningKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedSigningKeyConsumer) error
// WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID
WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error
// WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByName
WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error
// WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID
WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error
// WithManagedMACKeyByName retrieves an instantiated managed MAC key by name for consumption by the given function,
// with the same semantics as WithManagedKeyByName.
WithManagedMACKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedMACKeyConsumer) error
// WithManagedMACKeyByUUID retrieves an instantiated managed MAC key by UUID for consumption by the given function,
// with the same semantics as WithManagedKeyByUUID.
WithManagedMACKeyByUUID(ctx context.Context, keyUUID, backendUUID string, f ManagedMACKeyConsumer) error
}
type ManagedAsymmetricKey interface {
ManagedKey
GetPublicKey(ctx context.Context) (crypto.PublicKey, error)
}
type ManagedKeyLifecycle interface {
// GenerateKey generates a key in the KMS if it didn't yet exist, returning the id.
// If it already existed, returns the existing id. KMSKey's key material is ignored if present.
GenerateKey(ctx context.Context) (string, error)
}
type ManagedSigningKey interface {
ManagedAsymmetricKey
// Sign returns a digital signature of the provided value. The SignerOpts param must provide the hash function
// that generated the value (if any).
// The optional randomSource specifies the source of random values and may be ignored by the implementation
// (such as on HSMs with their own internal RNG)
Sign(ctx context.Context, value []byte, randomSource io.Reader, opts crypto.SignerOpts) ([]byte, error)
// Verify verifies the provided signature against the value. The SignerOpts param must provide the hash function
// that generated the value (if any).
// If true is returned the signature is correct, false otherwise.
Verify(ctx context.Context, signature, value []byte, opts crypto.SignerOpts) (bool, error)
// GetSigner returns an implementation of crypto.Signer backed by the managed key. This should be called
// as needed so as to use per request contexts.
GetSigner(context.Context) (crypto.Signer, error)
}
type ManagedEncryptingKey interface {
ManagedKey
Encrypt(ctx context.Context, plaintext []byte, options ...wrapping.Option) ([]byte, error)
Decrypt(ctx context.Context, ciphertext []byte, options ...wrapping.Option) ([]byte, error)
}
type ManagedMACKey interface {
ManagedKey
// MAC generates a MAC tag using the provided algorithm for the provided value.
MAC(ctx context.Context, algorithm string, data []byte) ([]byte, error)
}
type ManagedKeyRandomSource interface {
ManagedKey
// GetRandomBytes returns a number (specified by the count parameter) of random bytes sourced from the target managed key.
GetRandomBytes(count int) ([]byte, error)
} | go | github | https://github.com/hashicorp/vault | sdk/logical/managed_key.go |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import socket
import datetime
import time
import sys
import re
import binascii
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
pass
DOCUMENTATION = '''
---
module: wait_for
short_description: Waits for a condition before continuing.
description:
- Waiting for a port to become available is useful for when services
are not immediately available after their init scripts return -
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In 1.6 and later, this module can
also be used to wait for a file to be available or absent on the
filesystem.
- In 1.8 and later, this module can also be used to wait for active
connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
version_added: "0.7"
options:
host:
description:
- hostname or IP address to wait for
required: false
default: "127.0.0.1"
aliases: []
timeout:
description:
- maximum number of seconds to wait for
required: false
default: 300
delay:
description:
- number of seconds to wait before starting to poll
required: false
default: 0
port:
description:
- port number to poll
required: false
state:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
default: "started"
path:
version_added: "1.4"
required: false
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
description:
- Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex.
exclude_hosts:
version_added: "1.8"
required: false
description:
- list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
author: Jeroen Hoekx, John Jarvis, Andrii Radyk
'''
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
- wait_for: port=8000 delay=10
# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds
- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained
# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts
- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3
# wait until the file /tmp/foo is present before continuing
- wait_for: path=/tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
# wait until the lock file is removed
- wait_for: path=/var/lock/file.lock state=absent
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10
'''
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
connection_states = {
'01': 'ESTABLISHED',
'02': 'SYN_SENT',
'03': 'SYN_RECV',
'04': 'FIN_WAIT1',
'05': 'FIN_WAIT2',
'06': 'TIME_WAIT',
}
def __new__(cls, *args, **kwargs):
return load_platform_subclass(TCPConnectionInfo, args, kwargs)
def __init__(self, module):
self.module = module
(self.family, self.ip) = _convert_host_to_ip(self.module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg="psutil module required for wait_for")
def _get_exclude_ips(self):
if self.module.params['exclude_hosts'] is None:
return []
exclude_hosts = self.module.params['exclude_hosts'].split(',')
return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ]
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
connections = p.get_connections(kind='inet')
for conn in connections:
if conn.status not in self.connection_states.values():
continue
(local_ip, local_port) = conn.local_address
if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]:
(remote_ip, remote_port) = conn.remote_address
if remote_ip not in self.exclude_ips:
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
(self.family, self.ip) = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
if self.module.params['exclude_hosts'] is None:
return []
exclude_hosts = self.module.params['exclude_hosts'].split(',')
return [ _convert_host_to_hex(h) for h in exclude_hosts ]
def get_active_connections_count(self):
active_connections = 0
f = open(self.source_file[self.family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split(' ')
if tcp_connection[self.local_address_field] == 'local_address':
continue
if tcp_connection[self.connection_state_field] not in self.connection_states:
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]:
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if remote_ip not in self.exclude_ips:
active_connections += 1
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
Tuple containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0]
return (addrinfo[0], addrinfo[4][0])
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
Tuple containing address family and the little-endian converted host
"""
(family, ip) = _convert_host_to_ip(host)
hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper()
if family == socket.AF_INET:
hexed = _little_endian_convert_32bit(hexed)
elif family == socket.AF_INET6:
# xrange loops through each 8 character (4B) set in the 128bit total
hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ])
return (family, hexed)
def _little_endian_convert_32bit(block):
"""
Convert to little-endian, effectively transposing
the order of the four byte word
12345678 -> 78563412
Args:
block: String containing a 4 byte hex representation
Returns:
String containing the little-endian converted block
"""
# xrange starts at 6, and increments by -2 until it reaches -2
# which lets us start at the end of the string block and work to the begining
return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ])
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default='127.0.0.1'),
timeout=dict(default=300),
connect_timeout=dict(default=5),
delay=dict(default=0),
port=dict(default=None),
path=dict(default=None),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']),
exclude_hosts=dict(default=None, type='list')
),
)
params = module.params
host = params['host']
timeout = int(params['timeout'])
connect_timeout = int(params['connect_timeout'])
delay = int(params['delay'])
if params['port']:
port = int(params['port'])
else:
port = None
state = params['state']
path = params['path']
search_regex = params['search_regex']
if params['exclude_hosts']:
exclude_hosts = params['exclude_hosts'].split(',')
else:
exclude_hosts = []
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module")
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module")
if exclude_hosts and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if state in [ 'stopped', 'absent' ]:
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
f = open(path)
f.close()
time.sleep(1)
pass
except IOError:
break
elif port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
try:
s.connect( (host, port) )
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(1)
except:
break
else:
elapsed = datetime.datetime.now() - start
if port:
module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
os.stat(path)
if search_regex:
try:
f = open(path)
try:
if re.search(search_regex, f.read(), re.MULTILINE):
break
else:
time.sleep(1)
finally:
f.close()
except IOError:
time.sleep(1)
pass
else:
break
except OSError, e:
# File not present
if e.errno == 2:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
elif port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(connect_timeout)
try:
s.connect( (host, port) )
if search_regex:
data = ''
matched = False
while 1:
data += s.recv(1024)
if not data:
break
elif re.search(search_regex, data, re.MULTILINE):
matched = True
break
if matched:
s.shutdown(socket.SHUT_RDWR)
s.close()
break
else:
s.shutdown(socket.SHUT_RDWR)
s.close()
break
except:
time.sleep(1)
pass
else:
elapsed = datetime.datetime.now() - start
if port:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
### wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.now() < end:
try:
if tcpconns.get_active_connections_count() == 0:
break
except IOError:
pass
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import traceback
import logging
import base64
import threading
import csv
import tempfile
import psycopg2
import openerp.pooler as pooler
from openerp.osv import orm, fields
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class move_line_importer(orm.Model):
"""Asynchrone move / move line importer.
It will parse the saved CSV file using orm.BaseModel.load
in a thread. If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
_name = "move.line.importer"
_inherit = ['mail.thread']
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update(state='draft', report=False)
return super(move_line_importer, self).copy(cr, uid, id,
default=default,
context=context)
def track_success(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'done'
def track_error(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'error'
_track = {
'state': {
'async_move_line_importer.mvl_imported': track_success,
'async_move_line_importer.mvl_error': track_error,
},
}
_columns = {
'name': fields.datetime(
'Name',
required=True,
readonly=True
),
'state': fields.selection(
[('draft', 'New'),
('running', 'Running'),
('done', 'Success'),
('error', 'Error')],
readonly=True,
string='Status'
),
'report': fields.text(
'Report',
readonly=True
),
'file': fields.binary(
'File',
required=True
),
'delimiter': fields.selection(
[(',', ','), (';', ';'), ('|', '|')],
string="CSV delimiter",
required=True
),
'company_id': fields.many2one(
'res.company',
'Company'
),
'bypass_orm': fields.boolean(
'Fast import (use with caution)',
help="When enabled import will be faster but"
" it will not use orm and may"
" not support all CSV canvas. \n"
"Entry posted option will be skipped. \n"
"AA lines will only be created when"
" moves are posted. \n"
"Tax lines computation will be skipped. \n"
"This option should be used with caution"
" and in conjonction with provided canvas."
),
}
def _get_current_company(self, cr, uid, context=None,
model="move.line.importer"):
return self.pool.get('res.company')._company_default_get(
cr, uid,
model,
context=context
)
_defaults = {'state': 'draft',
'name': fields.datetime.now(),
'company_id': _get_current_company,
'delimiter': ',',
'bypass_orm': False}
def _parse_csv(self, cr, uid, imp_id):
"""Parse stored CSV file in order to be usable by BaseModel.load method.
Manage base 64 decoding.
:param imp_id: current importer id
:returns: (head [list of first row], data [list of list])
"""
# We use tempfile in order to avoid memory error with large files
with tempfile.TemporaryFile() as src:
imp = self.read(cr, uid, imp_id, ['file', 'delimiter'])
content = imp['file']
delimiter = imp['delimiter']
src.write(content)
with tempfile.TemporaryFile() as decoded:
src.seek(0)
base64.decode(src, decoded)
decoded.seek(0)
return self._prepare_csv_data(decoded, delimiter)
def _prepare_csv_data(self, csv_file, delimiter=","):
"""Parse a decoded CSV file and return head list and data list
:param csv_file: decoded CSV file
:param delimiter: CSV file delimiter char
:returns: (head [list of first row], data [list of list])
"""
try:
data = csv.reader(csv_file, delimiter=str(delimiter))
except csv.Error as error:
raise orm.except_orm(
_('CSV file is malformed'),
_("Maybe you have not choose correct separator \n"
"the error detail is : \n %s") % repr(error)
)
head = data.next()
head = [x.replace(' ', '') for x in head]
# Generator does not work with orm.BaseModel.load
values = [tuple(x) for x in data if x]
return (head, values)
def format_messages(self, messages):
"""Format error messages generated by the BaseModel.load method
:param messages: return of BaseModel.load messages key
:returns: formatted string
"""
res = []
for msg in messages:
rows = msg.get('rows', {})
res.append(_("%s. -- Field: %s -- rows %s to %s") % (
msg.get('message', 'N/A'),
msg.get('field', 'N/A'),
rows.get('from', 'N/A'),
rows.get('to', 'N/A'))
)
return "\n \n".join(res)
def _manage_load_results(self, cr, uid, imp_id, result, _do_commit=True,
context=None):
"""Manage the BaseModel.load function output and store exception.
Will generate success/failure report and store it into report field.
Manage commit and rollback even if load method uses PostgreSQL
Savepoints.
:param imp_id: current importer id
:param result: BaseModel.load returns
{ids: list(int)|False, messages: [Message]}
:param _do_commit: toggle commit management only used
for testing purpose only
:returns: current importer id
"""
# Import sucessful
state = msg = None
if not result['messages']:
msg = _("%s lines imported" % len(result['ids'] or []))
state = 'done'
else:
if _do_commit:
cr.rollback()
msg = self.format_messages(result['messages'])
state = 'error'
return (imp_id, state, msg)
def _write_report(self, cr, uid, imp_id, state, msg, _do_commit=True,
max_tries=5, context=None):
"""Commit report in a separated transaction.
It will avoid concurrent update error due to mail.message.
If transaction trouble happen we try 5 times to rewrite report
:param imp_id: current importer id
:param state: import state
:param msg: report summary
:returns: current importer id
"""
if _do_commit:
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
try:
self.write(local_cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
local_cr.commit()
# We handle concurrent error troubles
except psycopg2.OperationalError as pg_exc:
_logger.error(
"Can not write report. "
"System will retry %s time(s)" % max_tries
)
if (pg_exc.pg_code in orm.PG_CONCURRENCY_ERRORS_TO_RETRY and
max_tries >= 0):
local_cr.rollback()
local_cr.close()
remaining_try = max_tries - 1
self._write_report(cr, uid, imp_id, cr,
_do_commit=_do_commit,
max_tries=remaining_try,
context=context)
else:
_logger.exception(
'Can not log report - Operational update error'
)
raise
except Exception:
_logger.exception('Can not log report')
local_cr.rollback()
raise
finally:
if not local_cr.closed:
local_cr.close()
else:
self.write(cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
return imp_id
def _load_data(self, cr, uid, imp_id, head, data, _do_commit=True,
context=None):
"""Function that does the load of parsed CSV file.
If will log exception and susccess into the report fields.
:param imp_id: current importer id
:param head: CSV file head (list of header)
:param data: CSV file content (list of data list)
:param _do_commit: toggle commit management
only used for testing purpose only
:returns: current importer id
"""
state = msg = None
try:
res = self.pool['account.move'].load(cr, uid, head, data,
context=context)
r_id, state, msg = self._manage_load_results(cr, uid, imp_id, res,
_do_commit=_do_commit,
context=context)
except Exception as exc:
if _do_commit:
cr.rollback()
ex_type, sys_exc, tb = sys.exc_info()
tb_msg = ''.join(traceback.format_tb(tb, 30))
_logger.error(tb_msg)
_logger.error(repr(exc))
msg = _("Unexpected exception.\n %s \n %s" % (repr(exc), tb_msg))
state = 'error'
finally:
self._write_report(cr, uid, imp_id, state, msg,
_do_commit=_do_commit, context=context)
if _do_commit:
try:
cr.commit()
except psycopg2.Error:
_logger.exception('Can not do final commit')
cr.close()
return imp_id
def _allows_thread(self, imp_id):
"""Check if there is a async import of this file running
:param imp_id: current importer id
:returns: void
:raise: orm.except in case on failure
"""
for th in threading.enumerate():
if th.getName() == 'async_move_line_import_%s' % imp_id:
raise orm.except_orm(
_('An import of this file is already running'),
_('Please try latter')
)
def _check_permissions(self, cr, uid, context=None):
"""Ensure that user is allowed to create move / move line"""
move_obj = self.pool['account.move']
move_line_obj = self.pool['account.move.line']
move_obj.check_access_rule(cr, uid, [], 'create')
move_obj.check_access_rights(cr, uid, 'create', raise_exception=True)
move_line_obj.check_access_rule(cr, uid, [], 'create')
move_line_obj.check_access_rights(cr, uid, 'create',
raise_exception=True)
def import_file(self, cr, uid, imp_id, context=None):
""" Will do an asynchronous load of a CSV file.
Will generate an success/failure report and generate some
maile threads. It uses BaseModel.load to lookup CSV.
If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
if isinstance(imp_id, list):
imp_id = imp_id[0]
if context is None:
context = {}
current = self.read(cr, uid, imp_id, ['bypass_orm', 'company_id'],
load='_classic_write')
context['company_id'] = current['company_id']
bypass_orm = current['bypass_orm']
if bypass_orm:
# Tells create funtion to bypass orm
# As we bypass orm we ensure that
# user is allowed to creat move / move line
self._check_permissions(cr, uid, context=context)
context['async_bypass_create'] = True
head, data = self._parse_csv(cr, uid, imp_id)
self.write(cr, uid, [imp_id], {'state': 'running',
'report': _('Import is running')})
self._allows_thread(imp_id)
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
thread = threading.Thread(target=self._load_data,
name='async_move_line_import_%s' % imp_id,
args=(local_cr, uid, imp_id, head, data),
kwargs={'context': context.copy()})
thread.start()
return {} | unknown | codeparrot/codeparrot-clean | ||
test_kind: db_test
selector: {}
executor:
config:
dbtest_options: | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/dbtest.yml |
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from rest_framework import viewsets, permissions, views, generics
from rest_framework.response import Response
from .models import Wallet, Movement
from .serializers import WalletSerializer, MovementSerializer, UserSerializer
class WalletViewSet(viewsets.ModelViewSet):
model = Wallet
serializer_class = WalletSerializer
def get_queryset(self):
user = self.request.user
return user.wallets.all()
def pre_save(self, obj):
obj.owner = self.request.user
class MovementViewSet(viewsets.ModelViewSet):
model = Movement
serializer_class = MovementSerializer
def get_queryset(self):
return Movement.objects.filter(wallet__owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
model = get_user_model()
serializer_class = UserSerializer
permission_classes = (permissions.IsAdminUser,)
class MeView(views.APIView):
"""
This view returns the current user data
"""
def get(self, request, format=None):
serializer = UserSerializer(request.user)
return Response(serializer.data) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuditLogEntry.ip_address'
db.add_column('sentry_auditlogentry', 'ip_address',
self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AuditLogEntry.ip_address'
db.delete_column('sentry_auditlogentry', 'ip_address')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | unknown | codeparrot/codeparrot-clean | ||
#include "ruby.h"
#define init(n) {void Init_##n(VALUE klass); Init_##n(klass);}
void
Init_float(void)
{
VALUE mBug = rb_define_module("Bug");
VALUE klass = rb_define_class_under(mBug, "Float", rb_cObject);
TEST_INIT_FUNCS(init);
} | c | github | https://github.com/ruby/ruby | ext/-test-/float/init.c |
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test cube indexing, slicing, and extracting, and also the dot graphs.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
from contextlib import contextmanager
import os
import re
import sys
import unittest
import cf_units
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis
import iris.coords
import iris.cube
import iris.fileformats
import iris.fileformats.dot
import iris.tests.pp as pp
import iris.tests.stock
class IrisDotTest(tests.IrisTest):
def check_dot(self, cube, reference_filename):
test_string = iris.fileformats.dot.cube_text(cube)
reference_path = tests.get_result_path(reference_filename)
if os.path.isfile(reference_path):
with open(reference_path, 'r') as reference_fh:
reference = ''.join(reference_fh.readlines())
self._assert_str_same(reference, test_string, reference_filename, type_comparison_name='DOT files')
else:
with open(reference_path, 'w') as reference_fh:
reference_fh.writelines(test_string)
class TestBasicCubeConstruction(tests.IrisTest):
def setUp(self):
self.cube = iris.cube.Cube(np.arange(12, dtype=np.int32).reshape((3, 4)), long_name='test cube')
self.x = iris.coords.DimCoord(np.array([ -7.5, 7.5, 22.5, 37.5]), long_name='x')
self.y = iris.coords.DimCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y')
self.xy = iris.coords.AuxCoord(np.arange(12).reshape((3, 4)) * 3.0, long_name='xy')
def test_add_dim_coord(self):
# Lengths must match
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.y, 1)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.x, 0)
# Must specify a dimension
with self.assertRaises(TypeError):
self.cube.add_dim_coord(self.y)
# Add y
self.cube.add_dim_coord(self.y, 0)
self.assertEqual(self.cube.coords(), [self.y])
self.assertEqual(self.cube.dim_coords, (self.y,))
# Add x
self.cube.add_dim_coord(self.x, 1)
self.assertEqual(self.cube.coords(), [self.y, self.x])
self.assertEqual(self.cube.dim_coords, (self.y, self.x))
# Cannot add a coord twice
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.y, 0)
# ... even to cube.aux_coords
with self.assertRaises(ValueError):
self.cube.add_aux_coord(self.y, 0)
# Can't add AuxCoord to dim_coords
y_other = iris.coords.AuxCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y_other')
with self.assertRaises(ValueError):
self.cube.add_dim_coord(y_other, 0)
def test_add_scalar_coord(self):
scalar_dim_coord = iris.coords.DimCoord(23, long_name='scalar_dim_coord')
scalar_aux_coord = iris.coords.AuxCoord(23, long_name='scalar_aux_coord')
# Scalars cannot be in cube.dim_coords
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord, None)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, [])
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, ())
# Make sure that's still the case for a 0-dimensional cube.
cube = iris.cube.Cube(666)
self.assertEqual(cube.ndim, 0)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord, None)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, [])
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, ())
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord)
cube.add_aux_coord(scalar_aux_coord)
self.assertEqual(set(cube.aux_coords), {scalar_dim_coord, scalar_aux_coord})
# Various options for dims
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, [])
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, ())
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, None)
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord)
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
def test_add_aux_coord(self):
y_another = iris.coords.DimCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y_another')
# DimCoords can live in cube.aux_coords
self.cube.add_aux_coord(y_another, 0)
self.assertEqual(self.cube.dim_coords, ())
self.assertEqual(self.cube.coords(), [y_another])
self.assertEqual(self.cube.aux_coords, (y_another,))
# AuxCoords in cube.aux_coords
self.cube.add_aux_coord(self.xy, [0, 1])
self.assertEqual(self.cube.dim_coords, ())
self.assertEqual(self.cube.coords(), [y_another, self.xy])
self.assertEqual(set(self.cube.aux_coords), {y_another, self.xy})
# Lengths must match up
cube = self.cube.copy()
with self.assertRaises(ValueError):
cube.add_aux_coord(self.xy, [1, 0])
def test_remove_coord(self):
self.cube.add_dim_coord(self.y, 0)
self.cube.add_dim_coord(self.x, 1)
self.cube.add_aux_coord(self.xy, (0, 1))
self.assertEqual(set(self.cube.coords()), {self.y, self.x, self.xy})
self.cube.remove_coord('xy')
self.assertEqual(set(self.cube.coords()), {self.y, self.x})
self.cube.remove_coord('x')
self.assertEqual(self.cube.coords(), [self.y])
self.cube.remove_coord('y')
self.assertEqual(self.cube.coords(), [])
def test_immutable_dimcoord_dims(self):
# Add DimCoord to dimension 1
dims = [1]
self.cube.add_dim_coord(self.x, dims)
self.assertEqual(self.cube.coord_dims(self.x), (1,))
# Change dims object
dims[0] = 0
# Check the cube is unchanged
self.assertEqual(self.cube.coord_dims(self.x), (1,))
# Check coord_dims cannot be changed
dims = self.cube.coord_dims(self.x)
with self.assertRaises(TypeError):
dims[0] = 0
def test_immutable_auxcoord_dims(self):
# Add AuxCoord to dimensions (0, 1)
dims = [0, 1]
self.cube.add_aux_coord(self.xy, dims)
self.assertEqual(self.cube.coord_dims(self.xy), (0, 1))
# Change dims object
dims[0] = 1
dims[1] = 0
# Check the cube is unchanged
self.assertEqual(self.cube.coord_dims(self.xy), (0, 1))
# Check coord_dims cannot be changed
dims = self.cube.coord_dims(self.xy)
with self.assertRaises(TypeError):
dims[0] = 1
@tests.skip_data
class TestStockCubeStringRepresentations(tests.IrisTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()
def test_4d_str(self):
self.assertString(str(self.cube))
def test_4d_repr(self):
self.assertString(repr(self.cube))
def test_3d_str(self):
self.assertString(str(self.cube[0]))
def test_3d_repr(self):
self.assertString(repr(self.cube[0]))
def test_2d_str(self):
self.assertString(str(self.cube[0, 0]))
def test_2d_repr(self):
self.assertString(repr(self.cube[0, 0]))
def test_1d_str(self):
self.assertString(str(self.cube[0, 0, 0]))
def test_1d_repr(self):
self.assertString(repr(self.cube[0, 0, 0]))
def test_0d_str(self):
self.assertString(str(self.cube[0, 0, 0, 0]))
def test_0d_repr(self):
self.assertString(repr(self.cube[0, 0, 0, 0]))
@tests.skip_data
class TestCubeStringRepresentations(IrisDotTest):
def setUp(self):
path = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
self.cube_2d = iris.load_cube(path)
# Generate the unicode cube up here now it's used in two tests.
unicode_str = six.unichr(40960) + u'abcd' + six.unichr(1972)
self.unicode_cube = iris.tests.stock.simple_1d()
self.unicode_cube.attributes['source'] = unicode_str
def test_dot_simple_pp(self):
# Test dot output of a 2d cube loaded from pp.
cube = self.cube_2d
cube.attributes['my_attribute'] = 'foobar'
self.check_dot(cube, ('file_load', 'global_pp.dot'))
pt = cube.coord('time')
# and with custom coord attributes
pt.attributes['monty'] = 'python'
pt.attributes['brain'] = 'hurts'
self.check_dot(cube, ('file_load', 'coord_attributes.dot'))
del pt.attributes['monty']
del pt.attributes['brain']
del cube.attributes['my_attribute']
# TODO hybrid height and dot output - relatitionship links
@tests.skip_data
def test_dot_4d(self):
cube = iris.tests.stock.realistic_4d()
self.check_dot(cube, ('file_load', '4d_pp.dot'))
@tests.skip_data
def test_missing_coords(self):
cube = iris.tests.stock.realistic_4d()
cube.remove_coord('time')
cube.remove_coord('model_level_number')
self.assertString(repr(cube),
('cdm', 'str_repr', 'missing_coords_cube.repr.txt'))
self.assertString(str(cube),
('cdm', 'str_repr', 'missing_coords_cube.str.txt'))
@tests.skip_data
def test_cubelist_string(self):
cube_list = iris.cube.CubeList([iris.tests.stock.realistic_4d(),
iris.tests.stock.global_pp()])
self.assertString(str(cube_list), ('cdm', 'str_repr', 'cubelist.__str__.txt'))
self.assertString(repr(cube_list), ('cdm', 'str_repr', 'cubelist.__repr__.txt'))
def test_basic_0d_cube(self):
self.assertString(repr(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__repr__.txt'))
self.assertString(six.text_type(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__unicode__.txt'))
self.assertString(str(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__str__.txt'))
def test_similar_coord(self):
cube = self.cube_2d.copy()
lon = cube.coord('longitude')
lon.attributes['flight'] = '218BX'
lon.attributes['sensor_id'] = 808
lon.attributes['status'] = 2
lon2 = lon.copy()
lon2.attributes['sensor_id'] = 810
lon2.attributes['ref'] = 'A8T-22'
del lon2.attributes['status']
cube.add_aux_coord(lon2, [1])
lat = cube.coord('latitude')
lat2 = lat.copy()
lat2.attributes['test'] = 'True'
cube.add_aux_coord(lat2, [0])
self.assertString(str(cube), ('cdm', 'str_repr', 'similar.__str__.txt'))
def test_cube_summary_cell_methods(self):
cube = self.cube_2d.copy()
# Create a list of values used to create cell methods
test_values = ((("mean",), (u'longitude', 'latitude'), (u'6 minutes', '12 minutes'), (u'This is a test comment',)),
(("average",), (u'longitude', 'latitude'), (u'6 minutes', '15 minutes'), (u'This is another test comment', 'This is another comment')),
(("average",), (u'longitude', 'latitude'), (), ()),
(("percentile",), (u'longitude',), (u'6 minutes',), (u'This is another test comment',)))
for x in test_values:
# Create a cell method
cm = iris.coords.CellMethod(method=x[0][0], coords=x[1], intervals=x[2], comments=x[3])
cube.add_cell_method(cm)
self.assertString(str(cube), ('cdm', 'str_repr', 'cell_methods.__str__.txt'))
def test_cube_summary_alignment(self):
# Test the cube summary dimension alignment and coord name clipping
cube = iris.tests.stock.simple_1d()
aux = iris.coords.AuxCoord(
np.arange(11),
long_name='This is a really, really, really, really long '
'long_name that must be clipped because it is too long')
cube.add_aux_coord(aux, 0)
aux = iris.coords.AuxCoord(np.arange(11),
long_name='This is a short long_name')
cube.add_aux_coord(aux, 0)
self.assertString(str(cube), ('cdm', 'str_repr', 'simple.__str__.txt'))
@contextmanager
def unicode_encoding_change(self, new_encoding):
default_encoding = sys.getdefaultencoding()
reload(sys).setdefaultencoding(new_encoding)
yield
sys.setdefaultencoding(default_encoding)
del sys.setdefaultencoding
@unittest.skipIf(six.PY3, 'Encodings are sane in Python 3.')
def test_adjusted_default_encoding(self):
# Test cube str representation on non-system-default encodings.
# Doing this requires access to a sys method that is removed by default
# so reload sys to restore access.
# Note this does not currently work with utf-16 or utf-32.
# Run assertions inside 'with' statement to ensure test file is
# accurately re-created.
with self.unicode_encoding_change('utf-8'):
self.assertString(str(self.unicode_cube),
('cdm', 'str_repr',
'unicode_attribute.__str__.utf8.txt'))
with self.unicode_encoding_change('ascii'):
self.assertString(str(self.unicode_cube),
('cdm', 'str_repr',
'unicode_attribute.__str__.ascii.txt'))
def test_unicode_attribute(self):
self.assertString(
six.text_type(self.unicode_cube),
('cdm', 'str_repr', 'unicode_attribute.__unicode__.txt'))
@tests.skip_data
class TestValidity(tests.IrisTest):
def setUp(self):
self.cube_2d = iris.load_cube(tests.get_data_path(('PP', 'simple_pp', 'global.pp')))
def test_wrong_length_vector_coord(self):
wobble = iris.coords.DimCoord(points=[1, 2], long_name='wobble', units='1')
with self.assertRaises(ValueError):
self.cube_2d.add_aux_coord(wobble, 0)
def test_invalid_dimension_vector_coord(self):
wobble = iris.coords.DimCoord(points=[1, 2], long_name='wobble', units='1')
with self.assertRaises(ValueError):
self.cube_2d.add_dim_coord(wobble, 99)
class TestQueryCoord(tests.IrisTest):
def setUp(self):
self.t = iris.tests.stock.simple_2d_w_multidim_and_scalars()
def test_name(self):
coords = self.t.coords('dim1')
self.assertEqual([coord.name() for coord in coords], ['dim1'])
coords = self.t.coords('dim2')
self.assertEqual([coord.name() for coord in coords], ['dim2'])
coords = self.t.coords('an_other')
self.assertEqual([coord.name() for coord in coords], ['an_other'])
coords = self.t.coords('air_temperature')
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
coords = self.t.coords('wibble')
self.assertEqual(coords, [])
def test_long_name(self):
# Both standard_name and long_name defined
coords = self.t.coords(long_name='custom long name')
# coord.name() returns standard_name if available
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_standard_name(self):
# Both standard_name and long_name defined
coords = self.t.coords(standard_name='custom long name')
self.assertEqual([coord.name() for coord in coords], [])
coords = self.t.coords(standard_name='air_temperature')
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_var_name(self):
coords = self.t.coords(var_name='custom_var_name')
# Matching coord in test cube has a standard_name of 'air_temperature'.
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_axis(self):
cube = self.t.copy()
cube.coord("dim1").rename("latitude")
cube.coord("dim2").rename("longitude")
coords = cube.coords(axis='y')
self.assertEqual([coord.name() for coord in coords], ['latitude'])
coords = cube.coords(axis='x')
self.assertEqual([coord.name() for coord in coords], ['longitude'])
# Renaming shoudn't be enough
cube.coord("an_other").rename("time")
coords = cube.coords(axis='t')
self.assertEqual([coord.name() for coord in coords], [])
# Change units to "hours since ..." as it's the presence of a
# time unit that identifies a time axis.
cube.coord("time").units = 'hours since 1970-01-01 00:00:00'
coords = cube.coords(axis='t')
self.assertEqual([coord.name() for coord in coords], ['time'])
coords = cube.coords(axis='z')
self.assertEqual(coords, [])
def test_contains_dimension(self):
coords = self.t.coords(contains_dimension=0)
self.assertEqual([coord.name() for coord in coords], ['dim1', 'my_multi_dim_coord'])
coords = self.t.coords(contains_dimension=1)
self.assertEqual([coord.name() for coord in coords], ['dim2', 'my_multi_dim_coord'])
coords = self.t.coords(contains_dimension=2)
self.assertEqual(coords, [])
def test_dimensions(self):
coords = self.t.coords(dimensions=0)
self.assertEqual([coord.name() for coord in coords], ['dim1'])
coords = self.t.coords(dimensions=1)
self.assertEqual([coord.name() for coord in coords], ['dim2'])
# find all coordinates which do not describe a dimension
coords = self.t.coords(dimensions=[])
self.assertEqual([coord.name() for coord in coords], ['air_temperature', 'an_other'])
coords = self.t.coords(dimensions=2)
self.assertEqual(coords, [])
coords = self.t.coords(dimensions=[0, 1])
self.assertEqual([coord.name() for coord in coords], ['my_multi_dim_coord'])
def test_coord_dim_coords_keyword(self):
coords = self.t.coords(dim_coords=True)
self.assertEqual(set([coord.name() for coord in coords]), {'dim1', 'dim2'})
coords = self.t.coords(dim_coords=False)
self.assertEqual(set([coord.name() for coord in coords]), {'an_other', 'my_multi_dim_coord', 'air_temperature'})
def test_coords_empty(self):
coords = self.t.coords()
self.assertEqual(set([coord.name() for coord in coords]), {'dim1', 'dim2', 'an_other', 'my_multi_dim_coord', 'air_temperature'})
def test_coord(self):
coords = self.t.coords(self.t.coord('dim1'))
self.assertEqual([coord.name() for coord in coords], ['dim1'])
# check for metadata look-up by modifying points
coord = self.t.coord('dim1').copy()
coord.points = np.arange(5) * 1.23
coords = self.t.coords(coord)
self.assertEqual([coord.name() for coord in coords], ['dim1'])
def test_str_repr(self):
# TODO consolidate with the TestCubeStringRepresentations class
self.assertString(str(self.t), ('cdm', 'str_repr', 'multi_dim_coord.__str__.txt'))
self.assertString(repr(self.t), ('cdm', 'str_repr', 'multi_dim_coord.__repr__.txt'))
class TestCube2d(tests.IrisTest):
def setUp(self):
self.t = iris.tests.stock.simple_2d_w_multidim_and_scalars()
self.t.remove_coord('air_temperature')
class Test2dIndexing(TestCube2d):
def test_indexing_of_0d_cube(self):
c = self.t[0, 0]
self.assertRaises(IndexError, c.__getitem__, (slice(None, None), ) )
def test_cube_indexing_0d(self):
self.assertCML([self.t[0, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_indexing_1d(self):
self.assertCML([self.t[0, 0:]], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_indexing_1d_multi_slice(self):
self.assertCML([self.t[0, (0, 1)]], ('cube_slice', '2d_to_1d_cube_multi_slice.cml'))
self.assertCML([self.t[0, np.array([0, 1])]], ('cube_slice', '2d_to_1d_cube_multi_slice.cml'))
def test_cube_indexing_1d_multi_slice2(self):
self.assertCML([self.t[(0, 2), (0, 1, 3)]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
self.assertCML([self.t[np.array([0, 2]), (0, 1, 3)]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
self.assertCML([self.t[np.array([0, 2]), np.array([0, 1, 3])]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
def test_cube_indexing_1d_multi_slice3(self):
self.assertCML([self.t[(0, 2), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[np.array([0, 2]), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
def test_cube_indexing_no_change(self):
self.assertCML([self.t[0:, 0:]], ('cube_slice', '2d_orig.cml'))
def test_cube_indexing_reverse_coords(self):
self.assertCML([self.t[::-1, ::-1]], ('cube_slice', '2d_to_2d_revesed.cml'))
def test_cube_indexing_no_residual_change(self):
self.t[0:3]
self.assertCML([self.t], ('cube_slice', '2d_orig.cml'))
def test_overspecified(self):
self.assertRaises(IndexError, self.t.__getitem__, (0, 0, Ellipsis, 0))
self.assertRaises(IndexError, self.t.__getitem__, (0, 0, 0))
def test_ellipsis(self):
self.assertCML([self.t[Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[:, :, :]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, Ellipsis, Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, 0, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[0, Ellipsis, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[0, 0, Ellipsis]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[Ellipsis, (0, 2), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[(0, 2), Ellipsis, :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[(0, 2), :, Ellipsis]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
class TestIteration(TestCube2d):
def test_cube_iteration(self):
with self.assertRaises(TypeError):
for subcube in self.t:
pass
class Test2dSlicing(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices(['dim1', 'dim2']):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices(['dim2', 'dim1']):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_without_transpose(self):
for cube in self.t.slices(['dim2', 'dim1'], ordered=False):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_1dimension(self):
# Result came from the equivalent test test_cube_indexing_1d which
# does self.t[0, 0:]
slices = [res for res in self.t.slices(['dim2'])]
self.assertCML(slices[0], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_slice_zero_len_slice(self):
self.assertRaises(IndexError, self.t.__getitem__, (slice(0, 0)))
def test_cube_slice_with_non_existant_coords(self):
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
self.t.slices(['dim2', 'dim1', 'doesnt exist'])
def test_cube_extract_coord_with_non_describing_coordinates(self):
with self.assertRaises(ValueError):
self.t.slices(['an_other'])
class Test2dSlicing_ByDim(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices([0, 1]):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices([1, 0]):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_without_transpose(self):
for cube in self.t.slices([1, 0], ordered=False):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_1dimension(self):
# Result came from the equivalent test test_cube_indexing_1d which
# does self.t[0, 0:]
slices = [res for res in self.t.slices([1])]
self.assertCML(slices[0], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_slice_nodimension(self):
slices = [res for res in self.t.slices([])]
self.assertCML(slices[0], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_slice_with_non_existant_dims(self):
with self.assertRaises(IndexError):
self.t.slices([1, 0, 2])
def test_cube_slice_duplicate_dimensions(self):
with self.assertRaises(ValueError):
self.t.slices([1, 1])
class Test2dSlicing_ByMix(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices([0, 'dim2']):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices(['dim2', 0]):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_with_non_existant_dims(self):
with self.assertRaises(ValueError):
self.t.slices([1, 0, 'an_other'])
class Test2dExtraction(TestCube2d):
def test_cube_extract_0d(self):
# Extract the first value from each of the coords in the cube
# this result is shared with the self.t[0, 0] test
self.assertCML([self.t.extract(iris.Constraint(dim1=3.0, dim2=iris.coords.Cell(0, (0, 1))))], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_extract_1d(self):
# Extract the first value from the second coord in the cube
# this result is shared with the self.t[0, 0:] test
self.assertCML([self.t.extract(iris.Constraint(dim1=3.0))], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_extract_2d(self):
# Do nothing - return the original
self.assertCML([self.t.extract(iris.Constraint())], ('cube_slice', '2d_orig.cml'))
def test_cube_extract_coord_which_does_not_exist(self):
self.assertEqual(self.t.extract(iris.Constraint(doesnt_exist=8.1)), None)
def test_cube_extract_coord_with_non_existant_values(self):
self.assertEqual(self.t.extract(iris.Constraint(dim1=8)), None)
class Test2dExtractionByCoord(TestCube2d):
def test_cube_extract_by_coord_advanced(self):
# This test reverses the coordinate in the cube and also takes a subset of the original coordinate
points = np.array([9, 8, 7, 5, 4, 3, 2, 1, 0], dtype=np.int32)
bounds = np.array([[18, 19], [16, 17], [14, 15], [10, 11], [ 8, 9], [ 6, 7], [ 4, 5], [ 2, 3], [ 0, 1]], dtype=np.int32)
c = iris.coords.DimCoord(points, long_name='dim2', units='meters', bounds=bounds)
self.assertCML(self.t.subset(c), ('cube_slice', '2d_intersect_and_reverse.cml'))
@tests.skip_data
class TestCubeExtract(tests.IrisTest):
def setUp(self):
self.single_cube = iris.load_cube(tests.get_data_path(('PP', 'globClim1', 'theta.pp')), 'air_potential_temperature')
def test_simple(self):
constraint = iris.Constraint(latitude=10)
cube = self.single_cube.extract(constraint)
self.assertCML(cube, ('cdm', 'extract', 'lat_eq_10.cml'))
constraint = iris.Constraint(latitude=lambda c: c > 10)
self.assertCML(self.single_cube.extract(constraint), ('cdm', 'extract', 'lat_gt_10.cml'))
def test_combined(self):
constraint = iris.Constraint(latitude=lambda c: c > 10, longitude=lambda c: c >= 10)
self.assertCML(self.single_cube.extract(constraint), ('cdm', 'extract', 'lat_gt_10_and_lon_ge_10.cml'))
def test_no_results(self):
constraint = iris.Constraint(latitude=lambda c: c > 1000000)
self.assertEqual(self.single_cube.extract(constraint), None)
class TestCubeAPI(TestCube2d):
def test_getting_standard_name(self):
self.assertEqual(self.t.name(), 'test 2d dimensional cube')
def test_rename(self):
self.t.rename('foo')
self.assertEqual(self.t.name(), 'foo')
def test_var_name(self):
self.t.var_name = None
self.assertEqual(self.t.var_name, None)
self.t.var_name = 'bar'
self.assertEqual(self.t.var_name, 'bar')
def test_name_and_var_name(self):
# Assign only var_name.
self.t.standard_name = None
self.t.long_name = None
self.t.var_name = 'foo'
# name() should return var_name if standard_name and
# long_name are None.
self.assertEqual(self.t.name(), 'foo')
def test_rename_and_var_name(self):
self.t.var_name = 'bar'
self.t.rename('foo')
# Rename should clear var_name.
self.assertIsNone(self.t.var_name)
def test_setting_invalid_var_name(self):
# Name with whitespace should raise an exception.
with self.assertRaises(ValueError):
self.t.var_name = 'foo bar'
def test_setting_empty_var_name(self):
# Empty string should raise an exception.
with self.assertRaises(ValueError):
self.t.var_name = ''
def test_getting_units(self):
self.assertEqual(self.t.units, cf_units.Unit('meters'))
def test_setting_units(self):
self.assertEqual(self.t.units, cf_units.Unit('meters'))
self.t.units = 'kelvin'
self.assertEqual(self.t.units, cf_units.Unit('kelvin'))
def test_clearing_units(self):
self.t.units = None
self.assertEqual(str(self.t.units), 'unknown')
def test_convert_units(self):
# Set to 'volt'
self.t.units = cf_units.Unit('volt')
data = self.t.data.copy()
# Change to 'kV' - data should be scaled automatically.
self.t.convert_units('kV')
self.assertEqual(str(self.t.units), 'kV')
self.assertArrayAlmostEqual(self.t.data, data / 1000.0)
def test_coords_are_copies(self):
self.assertIsNot(self.t.coord('dim1'), self.t.copy().coord('dim1'))
def test_metadata_nop(self):
self.t.metadata = self.t.metadata
self.assertIsNone(self.t.standard_name)
self.assertEqual(self.t.long_name, 'test 2d dimensional cube')
self.assertIsNone(self.t.var_name)
self.assertEqual(self.t.units, 'meters')
self.assertEqual(self.t.attributes, {})
self.assertEqual(self.t.cell_methods, ())
def test_metadata_tuple(self):
metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'}, ())
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata[4])
self.assertIsNot(self.t.attributes, metadata[4])
self.assertEqual(self.t.cell_methods, ())
def test_metadata_dict(self):
metadata = {'standard_name': 'air_pressure',
'long_name': 'foo',
'var_name': 'bar',
'units': '',
'attributes': {'random': '12'},
'cell_methods': ()}
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata['attributes'])
self.assertIsNot(self.t.attributes, metadata['attributes'])
self.assertEqual(self.t.cell_methods, ())
def test_metadata_attrs(self):
class Metadata(object): pass
metadata = Metadata()
metadata.standard_name = 'air_pressure'
metadata.long_name = 'foo'
metadata.var_name = 'bar'
metadata.units = ''
metadata.attributes = {'random': '12'}
metadata.cell_methods = ()
metadata.cell_measures_and_dims = []
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata.attributes)
self.assertIsNot(self.t.attributes, metadata.attributes)
self.assertEqual(self.t.cell_methods, ())
self.assertEqual(self.t._cell_measures_and_dims, [])
def test_metadata_fail(self):
with self.assertRaises(TypeError):
self.t.metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'})
with self.assertRaises(TypeError):
self.t.metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'}, (), [], (), ())
with self.assertRaises(TypeError):
self.t.metadata = {'standard_name': 'air_pressure',
'long_name': 'foo',
'var_name': 'bar',
'units': '',
'attributes': {'random': '12'}}
with self.assertRaises(TypeError):
class Metadata(object): pass
metadata = Metadata()
metadata.standard_name = 'air_pressure'
metadata.long_name = 'foo'
metadata.var_name = 'bar'
metadata.units = ''
metadata.attributes = {'random': '12'}
self.t.metadata = metadata
class TestCubeEquality(TestCube2d):
def test_simple_equality(self):
self.assertEqual(self.t, self.t.copy())
def test_data_inequality(self):
self.assertNotEqual(self.t, self.t + 1)
def test_coords_inequality(self):
r = self.t.copy()
r.remove_coord(r.coord('an_other'))
self.assertNotEqual(self.t, r)
def test_attributes_inequality(self):
r = self.t.copy()
r.attributes['new_thing'] = None
self.assertNotEqual(self.t, r)
def test_array_attributes(self):
r = self.t.copy()
r.attributes['things'] = np.arange(3)
s = r.copy()
self.assertEqual(s, r)
s.attributes['things'] = np.arange(2)
self.assertNotEqual(s, r)
del s.attributes['things']
self.assertNotEqual(s, r)
def test_cell_methods_inequality(self):
r = self.t.copy()
r.add_cell_method(iris.coords.CellMethod('mean'))
self.assertNotEqual(self.t, r)
def test_not_compatible(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# The following changes should make the cubes incompatible.
# Different units.
r.units = 'kelvin'
self.assertFalse(self.t.is_compatible(r))
# Different cell_methods.
r = self.t.copy()
r.add_cell_method(iris.coords.CellMethod('mean', coords='dim1'))
self.assertFalse(self.t.is_compatible(r))
# Different attributes.
r = self.t.copy()
self.t.attributes['source']= 'bob'
r.attributes['source'] = 'alice'
self.assertFalse(self.t.is_compatible(r))
def test_compatible(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# The following changes should not affect compatibility.
# Different non-common attributes.
self.t.attributes['source']= 'bob'
r.attributes['origin'] = 'alice'
self.assertTrue(self.t.is_compatible(r))
# Different coordinates.
r.remove_coord('dim1')
self.assertTrue(self.t.is_compatible(r))
# Different data.
r.data = np.zeros(r.shape)
self.assertTrue(self.t.is_compatible(r))
# Different var_names (but equal name()).
r.var_name = 'foo'
self.assertTrue(self.t.is_compatible(r))
def test_is_compatible_ignore(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# Different histories.
self.t.attributes['history'] = 'One history.'
r.attributes['history'] = 'An alternative history.'
self.assertFalse(self.t.is_compatible(r))
# Use ignore keyword.
self.assertTrue(self.t.is_compatible(r, ignore='history'))
self.assertTrue(self.t.is_compatible(r, ignore=('history',)))
self.assertTrue(self.t.is_compatible(r, ignore=r.attributes))
def test_is_compatible_metadata(self):
metadata = self.t.metadata
self.assertTrue(self.t.is_compatible(metadata))
@tests.skip_data
class TestDataManagerIndexing(TestCube2d):
def setUp(self):
self.cube = iris.load_cube(tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp')))
def assert_is_lazy(self, cube):
self.assertTrue(cube.has_lazy_data())
def assert_is_not_lazy(self, cube):
self.assertFalse(cube.has_lazy_data())
def test_slices(self):
lat_cube = next(self.cube.slices(['grid_latitude', ]))
self.assert_is_lazy(lat_cube)
self.assert_is_lazy(self.cube)
def test_cube_empty_indexing(self):
test_filename = ('cube_slice', 'real_empty_data_indexing.cml')
r = self.cube[:5, ::-1][3]
rshape = r.shape
# Make sure we still have deferred data.
self.assert_is_lazy(r)
# check the CML of this result
self.assertCML(r, test_filename)
# The CML was checked, meaning the data must have been loaded.
# Check that the cube no longer has deferred data.
self.assert_is_not_lazy(r)
r_data = r.data
#finally, load the data before indexing and check that it generates the same result
c = self.cube
c.data
c = c[:5, ::-1][3]
self.assertCML(c, test_filename)
self.assertEqual(rshape, c.shape)
np.testing.assert_array_equal(r_data, c.data)
def test_real_data_cube_indexing(self):
cube = self.cube[(0, 4, 5, 2), 0, 0]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing1.cml'))
cube = self.cube[0, (0, 4, 5, 2), (3, 5, 5)]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing2.cml'))
cube = self.cube[(0, 4, 5, 2), 0, (3, 5, 5)]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing3.cml'))
self.assertRaises(IndexError, self.cube.__getitem__, ((0, 4, 5, 2), (3, 5, 5), 0, 0, 4) )
self.assertRaises(IndexError, self.cube.__getitem__, (Ellipsis, Ellipsis, Ellipsis, Ellipsis, Ellipsis, Ellipsis) )
def test_fancy_indexing_bool_array(self):
cube = self.cube
cube.data = np.ma.masked_array(cube.data, mask=cube.data > 100000)
r = cube[:, cube.coord('grid_latitude').points > 1]
self.assertEqual(r.shape, (10, 218, 720))
data = cube.data[:, self.cube.coord('grid_latitude').points > 1, :]
np.testing.assert_array_equal(data, r.data)
np.testing.assert_array_equal(data.mask, r.data.mask)
class TestCubeCollapsed(tests.IrisTest):
def partial_compare(self, dual, single):
result = iris.analysis.coord_comparison(dual, single)
self.assertEqual(len(result['not_equal']), 0)
self.assertEqual(dual.name(), single.name(), "dual and single stage standard_names differ")
self.assertEqual(dual.units, single.units, "dual and single stage units differ")
self.assertEqual(dual.shape, single.shape, "dual and single stage shape differ")
def collapse_test_common(self, cube, a_name, b_name, *args, **kwargs):
# preserve filenames from before the introduction of "grid_" in rotated coord names.
a_filename = a_name.replace("grid_", "")
b_filename = b_name.replace("grid_", "")
# compare dual and single stage collapsing
dual_stage = cube.collapsed(a_name, iris.analysis.MEAN)
dual_stage = dual_stage.collapsed(b_name, iris.analysis.MEAN)
# np.ma.average doesn't apply type promotion rules in some versions,
# and instead makes the result type float64. To ignore that case we
# fix up the dtype here if it is promotable from cube.dtype. We still
# want to catch cases where there is a loss of precision however.
if dual_stage.dtype > cube.dtype:
data = dual_stage.data.astype(cube.dtype)
dual_stage.data = data
self.assertCMLApproxData(dual_stage, ('cube_collapsed', '%s_%s_dual_stage.cml' % (a_filename, b_filename)), *args, **kwargs)
single_stage = cube.collapsed([a_name, b_name], iris.analysis.MEAN)
if single_stage.dtype > cube.dtype:
data = single_stage.data.astype(cube.dtype)
single_stage.data = data
self.assertCMLApproxData(single_stage, ('cube_collapsed', '%s_%s_single_stage.cml' % (a_filename, b_filename)), *args, **kwargs)
# Compare the cube bits that should match
self.partial_compare(dual_stage, single_stage)
@tests.skip_data
def test_multi_d(self):
cube = iris.tests.stock.realistic_4d()
# TODO: Re-instate surface_altitude & hybrid-height once we're
# using the post-CF test results.
cube.remove_aux_factory(cube.aux_factories[0])
cube.remove_coord('surface_altitude')
self.assertCML(cube, ('cube_collapsed', 'original.cml'))
# Compare 2-stage collapsing with a single stage collapse
# over 2 Coords.
self.collapse_test_common(cube, 'grid_latitude', 'grid_longitude',
rtol=1e-05)
self.collapse_test_common(cube, 'grid_longitude', 'grid_latitude',
rtol=1e-05)
self.collapse_test_common(cube, 'time', 'grid_latitude', rtol=1e-05)
self.collapse_test_common(cube, 'grid_latitude', 'time', rtol=1e-05)
self.collapse_test_common(cube, 'time', 'grid_longitude', rtol=1e-05)
self.collapse_test_common(cube, 'grid_longitude', 'time', rtol=1e-05)
self.collapse_test_common(cube, 'grid_latitude', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'grid_latitude',
rtol=5e-04)
self.collapse_test_common(cube, 'grid_longitude', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'grid_longitude',
rtol=5e-04)
self.collapse_test_common(cube, 'time', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'time',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'time',
rtol=5e-04)
self.collapse_test_common(cube, 'time', 'model_level_number',
rtol=5e-04)
# Collapse 3 things at once.
triple_collapse = cube.collapsed(['model_level_number',
'time', 'grid_longitude'],
iris.analysis.MEAN)
self.assertCMLApproxData(triple_collapse, ('cube_collapsed',
('triple_collapse_ml_pt_'
'lon.cml')),
rtol=5e-04)
triple_collapse = cube.collapsed(['grid_latitude',
'model_level_number', 'time'],
iris.analysis.MEAN)
self.assertCMLApproxData(triple_collapse, ('cube_collapsed',
('triple_collapse_lat_ml'
'_pt.cml')),
rtol=0.05)
# KNOWN PROBLEM: the previous 'rtol' is very large.
# Numpy 1.10 and 1.11 give significantly different results here.
# This may relate to known problems with summing over large arrays,
# which were largely fixed in numpy 1.9 but still occur in some cases,
# as-of numpy 1.11.
# Ensure no side effects
self.assertCML(cube, ('cube_collapsed', 'original.cml'))
@tests.skip_data
class TestTrimAttributes(tests.IrisTest):
def test_non_string_attributes(self):
cube = iris.tests.stock.realistic_4d()
attrib_key = "gorf"
attrib_val = 23
cube.attributes[attrib_key] = attrib_val
summary = cube.summary() # Get the cube summary
# Check through the lines of the summary to see that our attribute is there
attrib_re = re.compile("%s.*?%s" % (attrib_key, attrib_val))
for line in summary.split("\n"):
result = re.match(attrib_re, line.strip())
if result:
break
else: # No match found for our attribute
self.fail('Attribute not found in summary output of cube.')
@tests.skip_data
class TestMaskedData(tests.IrisTest, pp.PPTest):
def _load_3d_cube(self):
# This 3D data set has a missing a slice with SOME missing values.
# The missing data is in the pressure = 1000 hPa, forcast_period = 0,
# time = 1970-02-11 16:00:00 slice.
return iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "*.pp"]))
def test_complete_field(self):
# This pp field has no missing data values
cube = iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "mdi_test_1000_3.pp"]))
self.assertIsInstance(cube.data, np.ndarray)
def test_masked_field(self):
# This pp field has some missing data values
cube = iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "mdi_test_1000_0.pp"]))
self.assertIsInstance(cube.data, ma.core.MaskedArray)
def test_missing_file(self):
cube = self._load_3d_cube()
self.assertIsInstance(cube.data, ma.core.MaskedArray)
self.assertCML(cube, ('cdm', 'masked_cube.cml'))
def test_slicing(self):
cube = self._load_3d_cube()
# Test the slicing before deferred loading
full_slice = cube[3]
partial_slice = cube[0]
self.assertIsInstance(full_slice.data, np.ndarray)
self.assertIsInstance(partial_slice.data, ma.core.MaskedArray)
self.assertEqual(ma.count_masked(partial_slice.data), 25)
# Test the slicing is consistent after deferred loading
full_slice = cube[3]
partial_slice = cube[0]
self.assertIsInstance(full_slice.data, np.ndarray)
self.assertIsInstance(partial_slice.data, ma.core.MaskedArray)
self.assertEqual(ma.count_masked(partial_slice.data), 25)
def test_save_and_merge(self):
cube = self._load_3d_cube()
dtype = cube.dtype
fill_value = 123456
# extract the 2d field that has SOME missing values
masked_slice = cube[0]
masked_slice.data.fill_value = fill_value
# test saving masked data
reference_txt_path = tests.get_result_path(('cdm', 'masked_save_pp.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=masked_slice) as temp_pp_path:
iris.save(masked_slice, temp_pp_path)
# test merge keeps the mdi we just saved
cube1 = iris.load_cube(temp_pp_path)
self.assertEqual(cube1.dtype, dtype)
cube2 = cube1.copy()
# make cube1 and cube2 differ on a scalar coord, to make them mergeable into a 3d cube
cube2.coord("pressure").points = [1001.0]
merged_cubes = iris.cube.CubeList([cube1, cube2]).merge()
self.assertEqual(len(merged_cubes), 1, "expected a single merged cube")
merged_cube = merged_cubes[0]
self.assertEqual(merged_cube.dtype, dtype)
# Check that the original masked-array fill-value is *ignored*.
self.assertArrayAllClose(merged_cube.data.fill_value, -1e30)
@tests.skip_data
class TestConversionToCoordList(tests.IrisTest):
def test_coord_conversion(self):
cube = iris.tests.stock.realistic_4d()
# Single string
self.assertEqual(len(cube._as_list_of_coords('grid_longitude')), 1)
# List of string and unicode
self.assertEqual(len(cube._as_list_of_coords(['grid_longitude',
u'grid_latitude'], )), 2)
# Coord object(s)
lat = cube.coords("grid_latitude")[0]
lon = cube.coords("grid_longitude")[0]
self.assertEqual(len(cube._as_list_of_coords(lat)), 1)
self.assertEqual(len(cube._as_list_of_coords([lat, lon])), 2)
# Mix of string-like and coord
self.assertEqual(len(cube._as_list_of_coords(['grid_latitude', lon])),
2)
# Empty list
self.assertEqual(len(cube._as_list_of_coords([])), 0)
# Invalid coords
invalid_choices = [iris.analysis.MEAN, # Caused by mixing up argument order in call to cube.collasped for example
None,
['grid_latitude', None],
[lat, None],
]
for coords in invalid_choices:
with self.assertRaises(TypeError):
cube._as_list_of_coords(coords)
if __name__ == "__main__":
tests.main() | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "v40.refresh_numeric.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"liveNow": false,
"preload": false,
"refresh": "",
"schemaVersion": 42,
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Numeric Refresh Test Dashboard"
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v2beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dashboards_from_v0_to_v2/v2beta1.v40.refresh_numeric.v1beta1.json |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment | unknown | codeparrot/codeparrot-clean | ||
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
import json;
import copy;
info = {
'name' : "Embedded PI / COOCOX",
'link' : [ "http://coocox.org" ],
'variables' : 715,
'binary_name' : 'espruino_%v_embedded_pi.bin',
};
chip = {
'part' : "STM32F103RB", #T6
'family' : "STM32F1",
'package' : "LQFP64",
'ram' : 20,
'flash' : 128,
'speed' : 72,
'usart' : 3,
'spi' : 2,
'i2c' : 2,
'adc' : 3,
'dac' : 0,
};
# left-right, or top-bottom order
board = {
'top' : [ 'D15','D14', 'AREF','GND', 'D13', 'D12', 'D11','D10', 'D9', 'D8', '', 'D7', 'D6', 'D5', 'D4', 'D3', 'D2', 'D1', 'D0'],
'top2' : [ 'D39','D38','D37', 'D36', 'D35', 'D34', 'D33','D32', 'D31', 'D30', '', 'D29', 'D28', 'D27', 'D26', 'D25', 'D24', 'D23', 'D22'],
'bottom' : [ 'NC','DVCC','RST', '3.3', '5', 'GNDA', 'GND', 'VIN', '', 'D16', 'D17', 'D18', 'D19', 'D14', 'D15'],
'bottom2' : [ 'BOOT0','BOOT1','RST', '3.3', 'NC', 'GND', 'D26', 'D28', '', 'D40', 'D41', 'D42', 'D43', 'D44', 'D45'],
'right2' : [ 'NC','D13','D12' ],
'right' : [ 'GND','D11','NC' ],
};
board["right"].reverse()
board["right2"].reverse()
devices = {
# 'OSC' : { 'pin_1' : 'D0',
# 'pin_2' : 'D1' },
'OSC_RTC' : { 'pin_1' : 'D22',
'pin_2' : 'D23' },
'LED1' : { 'pin' : 'D13' },
# 'LED2' : { 'pin' : 'D3' },
'BTN1' : { 'pin' : 'D38' }, # 'C9'
'USB' : { 'pin_disc' : 'D39',
'pin_dm' : 'D40',
'pin_bp' : 'D41'
},
# 'SD' : { 'pin_cs' : 'D25',#'D2',
# 'pin_di' : 'D34',#'B15',
# 'pin_do' : 'D33',#'B14',
# 'pin_clk' : 'D32'}, #'B13'
};
board_css = """
#board {
width: 1052px;
height: 506px;
top: 300px;
left: 200px;
background-image: url(img/EMBEDDED_PI.jpg);
}
#boardcontainer {
height: 850px;
}
#top {
top: -20px;
left: 540px;
}
#top2 {
top: 60px;
left: 540px;
}
#bottom {
top: 500px;
left: 650px;
}
#bottom2 {
top: 420px;
left: 650px;
}
#left {
top: 155px;
right: 520px;
}
#left2 {
top:155px;
left: 20px;
}
#right {
top: 200px;
left: 1330px;
}
#right2 {
top: 200px;
right: -270px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f103xb.csv', 6, 10, 11)
# Embedded Pi Mapping
pinmapping = {
'D0' :'PC11',
'D1' :'PC10',
'D2' :'PC12',
'D3' :'PC6',
'D4' :'PC7',
'D5' :'PC8',
'D6' :'PC9',
'D7' :'PD2',
'D8' :'PA15',
'D9' :'PA8',
'D10':'PB12',
'D11':'PB15',
'D12':'PB14',
'D13':'PB13',
'D14':'PB7',
'D15':'PB6',
'D16':'PC0', # share with D40
'D17':'PC1', # share with D41
'D18':'PC2', # share with D42
'D19':'PC3', # share with D43
'D20':'PB7', # share with D14
'D21':'PB6', # share with D15
'D22':'PA3',
'D23':'PA2',
'D24':'PA1',
'D25':'PA0',
'D26':'PA9',
'D27':'PB0',
'D28':'PA10',
'D29':'PB1',
'D30':'PB8',
'D31':'PB9',
'D32':'PA4',
'D33':'PA7',
'D34':'PA6',
'D35':'PA5',
'D36':'PC13',
'D37':'PB5',
'D38':'PB11',
'D39':'PB10',
'D40':'PC0',
'D41':'PC1',
'D42':'PC2',
'D43':'PC3',
'D44':'PC4',
'D45':'PC5',
};
newpins = []
for newname in pinmapping:
print newname+" => "+pinmapping[newname]
pin = copy.deepcopy(pinutils.findpin(pins, pinmapping[newname], True))
pin["name"] = "P"+newname
pin["sortingname"] = newname[0] + newname[1:].rjust(2,'0')
newpins.append(pin)
# Because 'pinmapping' is NOT stored in order!!!
newpins = sorted(newpins, key=lambda pin: pin["sortingname"])
# print(json.dumps(newpins, sort_keys=True, indent=2))
return newpins | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarInt", "BinaryInt", "OctalInt", "HexInt", "HexCapsInt"]
from .compat import no_limit_int # NOQA
class ScalarInt(no_limit_int):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._underscore = underscore
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
class BinaryInt(ScalarInt):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore)
class OctalInt(ScalarInt):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore)
# mixed casing of A-F is not supported, when loading the first non digit
# determines the case
class HexInt(ScalarInt):
"""uses lower case (a-f)"""
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore)
class HexCapsInt(ScalarInt):
"""uses upper case (A-F)"""
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.integration.observability.tasksscheduled;
import io.micrometer.observation.ObservationRegistry;
import org.springframework.scheduling.annotation.SchedulingConfigurer;
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
public class ObservationSchedulingConfigurer implements SchedulingConfigurer {
private final ObservationRegistry observationRegistry;
public ObservationSchedulingConfigurer(ObservationRegistry observationRegistry) {
this.observationRegistry = observationRegistry;
}
@Override
public void configureTasks(ScheduledTaskRegistrar taskRegistrar) {
taskRegistrar.setObservationRegistry(this.observationRegistry);
}
} | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/integration/observability/tasksscheduled/ObservationSchedulingConfigurer.java |
import logging, threading
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
import kvm_utils, kvm_test_utils
def run_nic_promisc(test, params, env):
"""
Test nic driver in promisc mode:
1) Boot up a VM.
2) Repeatedly enable/disable promiscuous mode in guest.
3) TCP data transmission from host to guest, and from guest to host,
with 1/1460/65000/100000000 bytes payloads.
4) Clean temporary files.
5) Stop enable/disable promiscuous mode change.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
session_serial = vm.wait_for_serial_login(timeout=timeout)
def compare(filename):
md5_host = utils.hash_file(filename, method="md5")
md5_guest = session.cmd("md5sum %s" % filename)
md5_guest = md5_guest.split()[0]
if md5_host != md5_guest:
logging.error("MD5 hash mismatch between file %s "
"present on guest and on host", filename)
logging.error("MD5 hash for file on guest: %s,"
"MD5 hash for file on host: %s", md5_host, md5_guest)
return False
return True
ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
class ThreadPromiscCmd(threading.Thread):
def __init__(self, session, termination_event):
self.session = session
self.termination_event = termination_event
super(ThreadPromiscCmd, self).__init__()
def run(self):
set_promisc_cmd = ("ip link set %s promisc on; sleep 0.01;"
"ip link set %s promisc off; sleep 0.01" %
(ethname, ethname))
while True:
self.session.cmd_output(set_promisc_cmd)
if self.termination_event.isSet():
break
logging.info("Started thread to change promisc mode in guest")
termination_event = threading.Event()
promisc_thread = ThreadPromiscCmd(session_serial, termination_event)
promisc_thread.start()
dd_cmd = "dd if=/dev/urandom of=%s bs=%d count=1"
filename = "/tmp/nic_promisc_file"
file_size = params.get("file_size", "1, 1460, 65000, 100000000").split(",")
success_counter = 0
try:
for size in file_size:
logging.info("Create %s bytes file on host", size)
utils.run(dd_cmd % (filename, int(size)))
logging.info("Transfer file from host to guest")
try:
vm.copy_files_to(filename, filename)
except kvm_utils.SCPError, e:
logging.error("File transfer failed (%s)", e)
continue
if not compare(filename):
logging.error("Compare file failed")
continue
else:
success_counter += 1
logging.info("Create %s bytes file on guest", size)
session.cmd(dd_cmd % (filename, int(size)), timeout=100)
logging.info("Transfer file from guest to host")
try:
vm.copy_files_from(filename, filename)
except kvm_utils.SCPError, e:
logging.error("File transfer failed (%s)", e)
continue
if not compare(filename):
logging.error("Compare file failed")
continue
else:
success_counter += 1
logging.info("Clean temporary files")
cmd = "rm -f %s" % filename
utils.run(cmd)
session.cmd_output(cmd)
finally:
logging.info("Stopping the promisc thread")
termination_event.set()
promisc_thread.join(10)
logging.info("Restore the %s to the nonpromisc mode", ethname)
session.cmd_output("ip link set %s promisc off" % ethname)
session.close()
if success_counter != 2 * len(file_size):
raise error.TestFail("Some tests failed, succss_ratio : %s/%s" %
(success_counter, len(file_size))) | unknown | codeparrot/codeparrot-clean | ||
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from encodings import aliases
import __builtin__
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_norm_encoding_map = (' . '
'0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '
' abcdefghijklmnopqrstuvwxyz '
' '
' '
' ')
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
# Make sure we have an 8-bit string, because .translate() works
# differently for Unicode strings.
if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode):
# Note that .encode('latin-1') does *not* use the codec
# registry, so this call doesn't recurse. (See unicodeobject.c
# PyUnicode_AsEncodedString() for details)
encoding = encoding.encode('latin-1')
return '_'.join(encoding.translate(_norm_encoding_map).split())
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError,\
'module "%s" (%s) failed to register' % \
(mod.__name__, mod.__file__)
if not hasattr(entry[0], '__call__') or \
not hasattr(entry[1], '__call__') or \
(entry[2] is not None and not hasattr(entry[2], '__call__')) or \
(entry[3] is not None and not hasattr(entry[3], '__call__')) or \
(len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \
(len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')):
raise CodecRegistryError,\
'incompatible codecs in module "%s" (%s)' % \
(mod.__name__, mod.__file__)
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function) | unknown | codeparrot/codeparrot-clean | ||
---
- hosts: testhost
gather_facts: False
tasks:
- stat:
path: '/' | unknown | github | https://github.com/ansible/ansible | test/integration/targets/plugin_filtering/stat.yml |
"""Unit tests for the selected_tests script."""
import unittest
from buildscripts import errorcodes
# Debugging
errorcodes.list_files = True
TESTDATA_DIR = "./buildscripts/tests/data/errorcodes/"
class TestErrorcodes(unittest.TestCase):
"""Test errorcodes.py."""
def setUp(self):
# errorcodes.py keeps some global state.
errorcodes.codes = []
def test_regex_matching(self):
"""Test regex matching."""
captured_error_codes = []
def accumulate_files(code):
captured_error_codes.append(code)
errorcodes.parse_source_files(accumulate_files, TESTDATA_DIR + "regex_matching/")
self.assertEqual(32, len(captured_error_codes))
def test_dup_checking(self):
"""Test dup checking."""
assertions, errors, _ = errorcodes.read_error_codes(TESTDATA_DIR + "dup_checking/")
# `assertions` is every use of an error code. Duplicates are included.
self.assertEqual(4, len(assertions))
self.assertEqual([1, 2, 3, 2], list(map(lambda x: int(x.code), assertions)))
# All assertions with the same error code are considered `errors`.
self.assertEqual(2, len(errors))
self.assertEqual(2, int(errors[0].code))
self.assertEqual(2, int(errors[1].code))
def test_generate_next_code(self):
"""Test `get_next_code`."""
_, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + "generate_next_code/")
generator = errorcodes.get_next_code(seen)
self.assertEqual(21, next(generator))
self.assertEqual(22, next(generator))
def test_generate_next_server_code(self):
"""
Test `generate_next_server_code`.
This call to `read_error_codes` technically has no bearing on `get_next_code` when a
`server_ticket` is passed in. But it maybe makes sense for the test to do so in case a
future patch changes that relationship.
"""
_, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + "generate_next_server_code/")
print("Seen: " + str(seen))
generator = errorcodes.get_next_code(seen, server_ticket=12301)
self.assertEqual(1230101, next(generator))
self.assertEqual(1230103, next(generator))
def test_ticket_coersion(self):
"""Test `coerce_to_number`."""
self.assertEqual(0, errorcodes.coerce_to_number(0))
self.assertEqual(1234, errorcodes.coerce_to_number("1234"))
self.assertEqual(1234, errorcodes.coerce_to_number("server-1234"))
self.assertEqual(1234, errorcodes.coerce_to_number("SERVER-1234"))
self.assertEqual(-1, errorcodes.coerce_to_number("not a ticket")) | python | github | https://github.com/mongodb/mongo | buildscripts/tests/test_errorcodes.py |
# Owner(s): ["NNC"]
# ruff: noqa: F841
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class BaseTestClass(JitTestCase):
def setUp(self):
super().setUp()
self.tensorexpr_options = TensorExprTestOptions()
self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
self.dtypes = [torch.float32, torch.bfloat16] if LLVM_ENABLED else [torch.float32]
def tearDown(self):
self.tensorexpr_options.restore()
super().tearDown()
def assertLastGraphAllFused(self):
self.assertAllFused(torch.jit.last_executed_optimized_graph())
def warmup_and_run_forward(f, *args):
for _ in range(torch._C._jit_get_num_profiled_runs() + 1):
results = f(*args)
return results
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
def test_easy(self):
def easy(x, y):
aaa = torch.add(x, y)
return aaa
traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))
a = torch.rand(1024)
b = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())
def test_three_arg(self):
def easy(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(aaa, z)
return bbb
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.numpy() + b.numpy() + c.numpy()
np.testing.assert_allclose(npr, x.numpy())
def test_four_arg(self):
def run_addcmul(x, y, z, w):
c = torch.addcmul(torch.add(x, y), z, w)
return c
for dev in self.devices:
rand_a = torch.rand(1024, dtype=torch.float, device=dev)
rand_b = torch.rand(1024, dtype=torch.float, device=dev)
rand_c = torch.rand(1024, dtype=torch.float, device=dev)
rand_d = torch.rand(1024, dtype=torch.float, device=dev)
traced = torch.jit.trace(
run_addcmul,
(
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
torch.zeros(1024, dtype=torch.float, device=dev),
),
)
x = warmup_and_run_forward(traced, rand_a, rand_b, rand_c, rand_d)
self.assertLastGraphAllFused()
y = run_addcmul(rand_a, rand_b, rand_c, rand_d)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=1e-6)
def test_three_arg2(self):
for device in self.devices:
def test(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(aaa, z)
return bbb
M = 32
N = 32
traced = torch.jit.trace(
test,
(
torch.rand(M, N, device=device),
torch.rand(M, N, device=device),
torch.rand(M, N, device=device),
),
)
a = torch.rand(M, N, device=device)
b = torch.rand(M, N, device=device)
c = torch.rand(M, N, device=device)
x = traced(a, b, c)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()
np.testing.assert_allclose(npr, x.cpu().numpy())
def test_broadcast3(self):
for device in self.devices:
def test_body(M, N, L, K):
def test(x, y, z):
v1 = torch.add(x, y)
v2 = torch.add(v1, z)
return v2
a_shape = [M, N]
b_shape = [L, M, 1]
c_shape = [K, L, 1, 1]
traced = torch.jit.trace(
test,
(
torch.rand(*a_shape, device=device),
torch.rand(*b_shape, device=device),
torch.rand(*c_shape, device=device),
),
)
a = torch.rand(*a_shape, device=device)
b = torch.rand(*b_shape, device=device)
c = torch.rand(*c_shape, device=device)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()
np.testing.assert_allclose(npr, x.cpu().numpy())
test_configs = [[5, 2, 7, 3], [8, 8, 8, 8]]
for test_config in test_configs:
test_body(*test_config)
def test_all_combos(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
c = torch.add(x, b)
d = torch.add(c, a)
return d
def np_easy(x, y, z):
a = x + y
b = a + z
c = x + b
d = c + a
return d
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_rank_two(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
c = torch.add(x, b)
d = torch.add(c, a)
return d
def np_easy(x, y, z):
a = x + y
b = a + z
c = x + b
d = c + a
return d
shape = 32, 32
traced = torch.jit.trace(
easy, (torch.rand(shape), torch.rand(shape), torch.rand(shape))
)
a = torch.rand(shape)
b = torch.rand(shape)
c = torch.rand(shape)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_broadcast(self):
def easy(x, y, z):
a = torch.add(x, y)
b = torch.add(a, z)
return b
def np_easy(x, y, z):
a = x + y
b = a + z
return b
N = 32
traced = torch.jit.trace(easy, (torch.rand(N, N), torch.rand(N), torch.rand(N, N)))
a = torch.rand(N, N)
b = torch.rand(N)
c = torch.rand(N, N)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
npr = np_easy(a.numpy(), b.numpy(), c.numpy())
np.testing.assert_allclose(npr, x.numpy())
def test_broadcast_2(self):
zero = torch.tensor([0.0], dtype=torch.float)
def foo(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(zero, aaa)
return torch.add(bbb, z)
def foo_np(x, y, z):
a = x + y
b = zero.numpy() + a
return b + z
x = torch.rand(3, 4)
y = torch.ones(3, 1)
z = torch.rand(4)
traced = torch.jit.trace(foo, (x, y, z))
r = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
rnp = foo_np(x.numpy(), y.numpy(), z.numpy())
np.testing.assert_allclose(r, rnp)
def test_broadcast_big2(self):
zero = torch.tensor([0.0], dtype=torch.float)
def foo(x, y, z):
aaa = torch.add(x, y)
bbb = torch.add(zero, aaa)
return torch.add(bbb, z)
def foo_np(x, y, z):
a = x + y
b = zero.numpy() + a
return b + z
x = torch.rand(32, 1024)
y = torch.ones(32, 1)
z = torch.rand(1024)
traced = torch.jit.trace(foo, (x, y, z))
r = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
rnp = foo_np(x.numpy(), y.numpy(), z.numpy())
np.testing.assert_allclose(r, rnp)
def test_alpha(self):
def alpha(x):
aaa = torch.add(x, x, alpha=2.0)
return aaa
traced = torch.jit.trace(alpha, (torch.tensor([1.0])))
a = torch.tensor([1.0])
x = traced(a)
np.testing.assert_allclose(a.numpy() + 2.0 * a.numpy(), x.numpy())
@suppress_warnings
def test_constant(self):
def constant(x):
bbb = torch.tensor([1.0])
aaa = torch.add(x, bbb)
return aaa
traced = torch.jit.trace(constant, (torch.tensor([1.0])))
a = torch.tensor([1.0])
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + 1.0, x.numpy())
def test_add_sub(self):
def easy(x, y, z):
aaa = torch.add(x, y)
bbb = torch.sub(aaa, z)
return bbb
traced = torch.jit.trace(
easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))
)
a = torch.rand(1024)
b = torch.rand(1024)
c = torch.rand(1024)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy() - c.numpy(), x.numpy())
def test_promotion(self):
def easy(x, y):
aaa = torch.add(x, y)
return aaa
traced = torch.jit.trace(
easy,
(torch.zeros(1024, dtype=torch.int32), torch.rand(1024, dtype=torch.float32)),
)
a = torch.zeros(1024, dtype=torch.int32)
b = torch.rand(1024, dtype=torch.float32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())
def test_double(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.rand(TENSOR_LEN, dtype=torch.float64), torch.full((TENSOR_LEN,), 0.5, dtype=torch.float64)),
)
a = torch.rand(TENSOR_LEN, dtype=torch.double)
b = torch.full((TENSOR_LEN,), 0.5, dtype=torch.double)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_short(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_char(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_int64_promotion(self):
TENSOR_LEN = 8
def easy(x, y):
aaa = torch.add(x, y)
bbb = torch.mul(aaa, y)
return bbb
traced = torch.jit.trace(
easy,
(torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),
torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)),
)
a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)
b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())
def test_eq(self):
def easy(x, y):
c = torch.eq(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.zeros(1024, dtype=torch.int32)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_ne(self):
def easy(x, y):
c = torch.ne(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.zeros(1024, dtype=torch.int32)
b = torch.ones(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_ge(self):
def easy(x, y):
c = torch.ge(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
aa = np.empty([1024], dtype=np.int32)
aa.fill(5)
a = torch.from_numpy(aa)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_gt(self):
def easy(x, y):
c = torch.gt(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
a = torch.ones(1024, dtype=torch.int32)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_le(self):
def easy(x, y):
c = torch.le(x, y)
return c
traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))
aa = np.empty([1024], dtype=np.int32)
aa.fill(5)
a = torch.from_numpy(aa)
b = torch.zeros(1024, dtype=torch.int32)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.zeros(1024), x.numpy())
def test_lt(self):
def easy(x, y):
c = torch.lt(x, y)
return c
for dev in self.devices:
traced = torch.jit.trace(easy, (torch.zeros(1024, device=dev), torch.zeros(1024, device=dev)))
a = torch.ones(1024, dtype=torch.int32, device=dev)
b = torch.zeros(1024, dtype=torch.int32, device=dev)
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
np.testing.assert_allclose(np.zeros(1024), x.cpu().numpy())
@suppress_warnings
def test_min_max(self):
def test(x, y):
return torch.max(torch.min(x, y), torch.tensor([4.0]))
traced = torch.jit.trace(test, (torch.zeros(1024), torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
b = 8.0 * torch.rand(1024)
np.testing.assert_allclose(
warmup_and_run_forward(traced, a, b), np.maximum(np.minimum(a.numpy(), b.numpy()), [4.0])
)
self.assertLastGraphAllFused()
def test_min_max_reduction(self):
def test(x):
return torch.min(x) + torch.max(x)
traced = torch.jit.trace(test, (torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))
self.assertLastGraphAllFused()
def test_min_max_reduction2(self):
def test(x):
return x.min() + x.max()
traced = torch.jit.trace(test, (torch.zeros(1024)))
a = 8.0 * torch.rand(1024)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))
self.assertLastGraphAllFused()
def test_min_max_reduction_dim1(self):
def test(x):
return torch.min(x, 1)[0] + torch.max(x, 1)[0]
traced = torch.jit.trace(test, (torch.zeros(16, 16)))
a = 8.0 * torch.rand(16, 16)
np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(
a.numpy(), axis=1) + np.amax(a.numpy(), axis=1))
self.assertLastGraphAllFused()
def test_min_max_reduction_dim1_2(self):
def test(x):
return torch.min(x * x, 1)
traced = torch.jit.trace(test, (torch.zeros(16, 16)))
a = 8.0 * torch.rand(16, 16)
np.testing.assert_allclose(warmup_and_run_forward(traced, a)[0], np.amin((a * a).numpy(), axis=1))
self.assertLastGraphAllFused()
def test_clamp(self):
def test(x):
return torch.clamp(x + 3.0, 0.0, 6.0)
for dev in self.devices:
traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))
a = 20.0 * torch.rand(1024, device=dev) - 10.0
an = a.cpu().numpy()
np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip(an + 3.0, 0.0, 6.0))
self.assertLastGraphAllFused()
def test_relu(self):
def test(x):
return torch.clamp(F.relu(x), 0, 0.5)
for dev in self.devices:
traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))
a = 20.0 * torch.rand(1024, device=dev) - 10.0
an = a.cpu().numpy()
np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip((np.maximum(0, an)), 0, 0.5))
self.assertLastGraphAllFused()
def test_reps(self):
def easy(x, y):
c = torch.add(x, y)
return c
traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))
for _ in range(32):
a = torch.ones(1024)
b = torch.zeros(1024)
x = warmup_and_run_forward(traced, a, b)
np.testing.assert_allclose(np.ones(1024), x.numpy())
def test_add_const_rhs(self):
def test(x):
return x + 3.0
traced = torch.jit.trace(test, torch.rand(4))
x = torch.rand(4)
y = warmup_and_run_forward(traced, x)
self.assertLastGraphAllFused()
np.testing.assert_allclose(x.numpy() + 3.0, y.numpy())
def test_int_output(self):
def test(x, y, z):
return x * y * z
xs = [(torch.rand(4) * 3 + 1).to(torch.int32) for i in range(3)]
x, y, z = xs
xn, yn, zn = (t.numpy() for t in xs)
traced = torch.jit.trace(test, (x, y, z))
res = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
np.testing.assert_allclose(xn * yn * zn, res.numpy())
def test_binary_ops(self):
def test_atan2(x, y):
c = torch.atan2(torch.add(x, y), y)
return c
def test_gt(x, y):
c = torch.gt(torch.add(x, y), y)
return c
def test_ge(x, y):
c = torch.ge(torch.add(x, y), y)
return c
def test_lt(x, y):
c = torch.lt(torch.add(x, y), y)
return c
def test_le(x, y):
c = torch.le(torch.add(x, y), y)
return c
def test_lerp(x, y):
c = torch.lerp(torch.add(x, 1), x, 2.0)
return c
def test_mul(x, y):
c = torch.mul(torch.add(x, y), y)
return c
def test_ne(x, y):
c = torch.ne(torch.add(x, y), y)
return c
def test_div(x, y):
c = torch.div(torch.add(x, y), 2)
return c
def test_eq(x, y):
c = torch.eq(torch.add(x, y), y)
return c
def test_fmod(x, y):
c = torch.fmod(torch.add(x, y), 2)
return c
def test_sub(x, y):
c = torch.sub(torch.add(x, y), x)
return c
def test_remainder(x, y):
c = torch.remainder(torch.add(x, y), 3.0)
return c
def test_pow(x, y):
c = torch.pow(torch.add(x, y), 2.0)
return c
def test_type_as(x, y):
return x.type_as(torch.add(x, y))
cmp_fns = {
test_gt,
test_ge,
test_lt,
test_le,
test_ne,
test_eq
}
non_cmp_fns = {
test_atan2,
test_lerp,
test_mul,
test_div,
test_fmod,
test_sub,
test_remainder,
test_pow,
test_type_as,
}
all_test_fns = cmp_fns.union(non_cmp_fns)
fn_dev_dtype = itertools.product(all_test_fns, self.devices, self.dtypes)
for torch_fn, dev, data_type in fn_dev_dtype:
if torch_fn is test_lerp and data_type is torch.bfloat16:
continue
rand_a = torch.rand(1024, dtype=data_type, device=dev)
rand_b = torch.rand(1024, dtype=data_type, device=dev)
in1 = 20 * torch.rand(1024, dtype=data_type, device=dev)
in2 = 20 * torch.rand(1024, dtype=data_type, device=dev)
traced = torch.jit.trace(torch_fn, (in1, in2))
x = warmup_and_run_forward(traced, rand_a, rand_b)
self.assertLastGraphAllFused()
_atol = 2e-3
_rtol = 1e-5
if data_type is torch.bfloat16:
# Compared to aten logic, NNC could save additional BF16/Fp32 conversion.
# Take d = a + b - c as an example, the aten logic is as follows at
# operator level:
# tmp = to_bf16(to_fp32(a) + to_fp32(b))
# d = to_bf16(to_fp32(tmp) + to_fp32(c))
# But NNC could fuse the compression and remove the redundant conversions.
# The final statement is as follows
# d = to_bf16(to_fp32(a) + to_fp32(b) + to_fp32(c))
# Hence, we simulate NNC computation by feeding fp32 tensors and converting
# the result tensor back to bf16. The simulation could avoid the numeric
# deviation to simplify the result comparison
y = warmup_and_run_forward(traced, rand_a.float(), rand_b.float())
if torch_fn not in cmp_fns:
y = y.bfloat16()
_atol = 2e-2
else:
y = torch_fn(rand_a, rand_b)
self.assertEqual(x.cpu(), y.cpu(), atol=_atol, rtol=_rtol)
def test_unary_ops(self):
def test_cast_float(x, y):
c = torch.ops.aten._cast_Float(torch.add(x, y))
return c
def test_round(x, y):
c = torch.round(torch.add(x, y))
return c
def test_sin(x, y):
c = torch.sin(torch.add(x, y))
return c
def test_asin(x, y):
c = torch.asin(torch.add(x, y))
return c
def test_sinh(x, y):
c = torch.sinh(torch.add(x, y))
return c
def test_cos(x, y):
c = torch.cos(torch.add(x, y))
return c
def test_acos(x, y):
c = torch.acos(torch.add(x, y))
return c
def test_cosh(x, y):
c = torch.cosh(torch.add(x, y))
return c
def test_tan(x, y):
c = torch.tan(torch.add(x, y))
return c
def test_atan(x, y):
c = torch.atan(torch.add(x, y))
return c
def test_tanh(x, y):
c = torch.tanh(torch.add(x, y))
return c
def test_sqrt(x, y):
c = torch.sqrt(torch.add(x, y))
return c
def test_rsqrt(x, y):
c = torch.rsqrt(torch.add(x, y))
return c
def test_floor(x, y):
c = torch.floor(torch.add(x, y))
return c
def test_ceil(x, y):
c = torch.ceil(torch.add(x, y))
return c
def test_trunc(x, y):
c = torch.trunc(torch.add(x, y))
return c
def test_abs(x, y):
c = torch.abs(torch.add(x, y))
return c
def test_log(x, y):
c = torch.log(torch.add(x, y))
return c
def test_log2(x, y):
c = torch.log2(torch.add(x, y))
return c
def test_log10(x, y):
c = torch.log10(torch.add(x, y))
return c
def test_log1p(x, y):
c = torch.log1p(torch.add(x, y))
return c
def test_rqrt(x, y):
c = torch.rsqrt(torch.add(x, y))
return c
def test_erf(x, y):
c = torch.erf(torch.add(x, y))
return c
def test_exp(x, y):
c = torch.exp(torch.add(x, y))
return c
def test_expm1(x, y):
c = torch.expm1(torch.add(x, y))
return c
def test_erfc(x, y):
c = torch.erfc(torch.add(x, y))
return c
def test_frac(x, y):
c = torch.frac(torch.add(x, y))
return c
def test_lgamma(x, y):
c = torch.lgamma(torch.add(x, y))
return c
def test_sigmoid(x, y):
c = torch.sigmoid(torch.add(x, y))
return c
def test_reciprocal(x, y):
c = torch.reciprocal(torch.add(x, y))
return c
def test_neg(x, y):
c = torch.neg(torch.add(x, y))
return c
def test_relu(x, y):
c = torch.relu(torch.add(x, y))
return c
def test_hardtanh(x, y):
c = F.hardtanh(torch.add(x, y), -1.0, 1.0)
return c
def test_threshold(x, y):
c = F.threshold(torch.add(x, y), 0.5, 10)
return c
gpu_only_fns = {
test_erf,
test_erfc
}
fns = {
test_round,
test_sin,
test_asin,
test_sinh,
test_cos,
test_acos,
test_cosh,
test_tan,
test_atan,
test_sqrt,
test_floor,
test_ceil,
test_trunc,
test_abs,
test_log,
test_log2,
test_log10,
test_log1p,
test_rsqrt,
test_exp,
test_expm1,
test_frac,
test_lgamma,
test_reciprocal,
test_neg,
test_threshold,
test_relu,
test_tanh,
test_hardtanh,
test_sigmoid,
}
fn_dev_dtype = itertools.product(gpu_only_fns.union(fns), self.devices, self.dtypes)
torch.manual_seed(0)
for torch_fn, dev, data_type in fn_dev_dtype:
if torch_fn is test_lgamma and dev == "cuda":
# lgamma_cuda does not support BF16
continue
rand_a = torch.rand(1024, dtype=data_type, device=dev)
rand_b = torch.rand(1024, dtype=data_type, device=dev)
ins = 20 * torch.rand(1024, dtype=data_type, device=dev)
cc = np.empty([1024], dtype=np.float32)
cc.fill(np.nan)
nans = torch.from_numpy(cc).to(dev)
traced = torch.jit.trace(torch_fn, (ins, ins))
x = warmup_and_run_forward(traced, rand_a, rand_b)
self.assertLastGraphAllFused()
_atol = 5e-3 if data_type is torch.bfloat16 else 2e-3
_rtol = 1e-5
if data_type is torch.bfloat16 and torch_fn not in gpu_only_fns:
y = warmup_and_run_forward(traced, rand_a.float(), rand_b.float())
y = y.bfloat16()
else:
y = torch_fn(rand_a, rand_b)
self.assertEqual(x.cpu(), y.cpu(), atol=_atol, rtol=_rtol)
# nans
# TODO: reenable. Currently all of the tests fail
# traced = torch.jit.trace(torch_fn, (ins, ins))
# x = warmup_and_run_forward(traced, rand_a, rand_b)
# y = torch_fn(nans, rand_b)
# try:
# np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
# print("Succeeded on dev=", dev, "function=", torch_fn)
# except AssertionError:
# # Print extra info before exiting:
# print("Failed on dev=", dev, "function=", torch_fn)
# # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
def test_round_2(self):
def round(x):
return torch.round(x)
for data_type in [torch.float32, torch.double]:
a = torch.tensor([0.2, 1.6, 2.5, 3.5]).to(data_type)
traced = torch.jit.trace(round, (a))
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
y = round(x)
self.assertEqual(x, y)
def test_rand_like(self):
N = 1 << 16
def run_rand_like(x, y):
return torch.rand_like(torch.add(x, y))
for device in self.devices:
x = torch.rand(N, device=device)
traced = torch.jit.trace(run_rand_like, (x, x), check_trace=False)
for data_type in self.dtypes:
_x = x.to(dtype=data_type)
x_v = warmup_and_run_forward(traced, _x, _x)
self.assertLastGraphAllFused()
x_np = x.cpu().numpy()
x1_mean = np.mean(x_np)
x2_mean = np.mean(x_np ** 2)
x3_mean = np.mean(x_np ** 3)
np.testing.assert_allclose(x1_mean, 1. / 2, rtol=2e-2)
np.testing.assert_allclose(x2_mean, 1. / 3, rtol=2e-2)
np.testing.assert_allclose(x3_mean, 1. / 4, rtol=2e-2)
def test_nans(self):
def test_max(x, y):
return torch.max(2 * x, 2 * y)
def test_min(x, y):
return torch.min(2 * x, 2 * y)
tmax = torch.jit.trace(test_max, (torch.rand(1), torch.rand(1)))
tmin = torch.jit.trace(test_min, (torch.rand(1), torch.rand(1)))
for data_type in self.dtypes:
x = torch.tensor([np.nan]).to(dtype=data_type)
y = torch.tensor([1.0]).to(dtype=data_type)
if not np.isnan(warmup_and_run_forward(tmin, x, y).float().item()):
raise AssertionError("expected nan for tmin(x, y)")
if not np.isnan(warmup_and_run_forward(tmin, y, x).float().item()):
raise AssertionError("expected nan for tmin(y, x)")
self.assertLastGraphAllFused()
if not np.isnan(warmup_and_run_forward(tmax, x, y).float().item()):
raise AssertionError("expected nan for tmax(x, y)")
if not np.isnan(warmup_and_run_forward(tmax, y, x).float().item()):
raise AssertionError("expected nan for tmax(y, x)")
self.assertLastGraphAllFused()
def test_double_intrinsics(self):
def do_pow(x):
return torch.pow(x, 7)
for device in self.devices:
x = torch.rand(10, dtype=torch.double, device=device)
traced = torch.jit.trace(do_pow, (x))
x = warmup_and_run_forward(traced, x)
self.assertLastGraphAllFused()
def test_remainder(self):
def run_remainder(x, y):
c = torch.remainder(torch.add(x, y), x)
return c
for data_type in self.dtypes:
a = torch.rand(1024, dtype=data_type)
b = torch.rand(1024, dtype=data_type)
zeros = torch.zeros(1024, dtype=data_type)
cc = np.array(1024, dtype=float)
cc.fill(np.nan)
nans = torch.from_numpy(cc).to(dtype=data_type)
# random floats
zeros1 = torch.zeros(1024, dtype=data_type)
zeros2 = torch.zeros(1024, dtype=data_type)
traced = torch.jit.trace(run_remainder, (zeros1, zeros2))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = run_remainder(a, b)
if data_type is torch.bfloat16:
self.assertEqual(x, y, atol=4e-3, rtol=2e-3)
else:
self.assertEqual(x, y)
# div by 0
traced = torch.jit.trace(run_remainder, (zeros1, zeros2))
x = warmup_and_run_forward(traced, zeros, a)
self.assertLastGraphAllFused()
y = run_remainder(zeros, a)
self.assertEqual(x, y)
# numerators and denominatos are nan
traced = torch.jit.trace(run_remainder, (zeros1, zeros2))
x = warmup_and_run_forward(traced, nans, a)
self.assertLastGraphAllFused()
y = run_remainder(nans, a)
self.assertEqual(x, y)
def test_multioutput(self):
def easy(x):
b = x + 1
c = b + b
return (b, c)
traced = torch.jit.trace(easy, (torch.zeros(1024)))
a = torch.zeros(1024)
b, c = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
bp = a.numpy() + 1
cp = bp + bp
np.testing.assert_allclose(b.numpy(), bp)
np.testing.assert_allclose(c.numpy(), cp)
def test_chunk(self):
def easy(x):
y = x + 1
aaa, bbb = torch.chunk(y, 2)
return aaa + bbb
for data_type in self.dtypes:
trace_input = torch.zeros(1024, 1024, dtype=data_type)
traced = torch.jit.trace(easy, (trace_input))
a = torch.zeros(32, 32, dtype=data_type)
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
npr = a.float().numpy()
npr2 = npr + 1
npr_a, npr_b = np.array_split(npr2, 2)
np.testing.assert_allclose(npr_a + npr_b, x.float().numpy())
def test_cat(self):
for device in self.devices:
_dim = 1
def foo(*args):
args_2 = [v + i for i, v in enumerate(args)]
v = torch.cat(args_2, dim=_dim)
return v * v
for data_type in self.dtypes:
M = 16
Ns = [128, 16, 1]
values = [torch.zeros(M, N, dtype=data_type, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().float().numpy(), x.cpu().float().numpy())
# Test channels-last
for _cur_dim in range(4):
_dim = _cur_dim
values = [torch.randn((2, 3, 4, 5), device=device).to(memory_format=torch.channels_last) for _ in range(10)]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
self.assertEqual(ref, x)
# This test checks that we correctly handle fusion group with just aten::cat in it.
# Note that the test only makes sense with min_fusion_group=1, otherwise no
# fusion groups would be formed at all.
# TODO: Fix and re-enable the test.
@unittest.skip("cat is broken with fusion group inlining disabled")
def test_cat_only(self):
for device in self.devices:
def foo(*args):
args_2 = [v + i for i, v in enumerate(args)]
v = torch.cat(args_2, dim=1)
return v
M = 16
Ns = [128, 16, 1]
values = [torch.zeros(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_negative_dim(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=-1)
return v * v
M = 16
Ns = [128, 16, 1]
values = [torch.randn(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_promote_inputs(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=1)
return v * v
M = 16
Ns = [128, 16, 1]
dtypes = [torch.half, torch.float32, torch.double]
values = [torch.randn(M, N, device=device, dtype=dt) for N, dt in zip(Ns, dtypes)]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_empty_tensors(self):
for device in self.devices:
def foo(*args):
v = torch.cat(args, dim=1)
return v * v
M = 16
Ns = [128, 16, 1]
empty = torch.tensor([], device=device, dtype=torch.double)
values = [empty] + [torch.randn(M, N, device=device) for N in Ns]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
# now test with only empty tensors
values = [empty for i in range(3)]
traced = torch.jit.trace(foo, values)
x = warmup_and_run_forward(traced, *values)
self.assertLastGraphAllFused()
ref = foo(*values)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_cat_with_constant_dim(self):
for device in self.devices:
def foo(*args):
v1 = torch.cat(args, dim=1)
v2 = torch.cat([v1], dim=1)
return v2 * v2
empty = torch.tensor([], device=device, dtype=torch.float32)
inputs = [empty] + [torch.randn(1, 64, device=device), torch.randn(1, 64, device=device)]
traced = torch.jit.trace(foo, inputs)
x = warmup_and_run_forward(traced, *inputs)
self.assertLastGraphAllFused()
ref = foo(*inputs)
np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())
def test_scalar(self):
@torch.jit.script
def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
@torch.jit.script
def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
for test in (test_float, test_int):
for data_type in self.dtypes:
x, y, z = (torch.rand(4, dtype=data_type) for i in range(3))
a, b = 1, 2
test(x, y, z, a, b)
r = test(x, y, z, a, b)
self.assertEqual(r, x + y * a + z * b)
def test_loop(self):
@torch.jit.script
def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:
b = y
for _ in range(z):
a = x + y
b = b + y
return b
x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4)
test(x, y, z)
r = test(x, y, z)
def test_slice(self):
def easy(x, y):
a = x[0:512:2]
b = y[0:512:2]
return a + b
traced = torch.jit.trace(easy, (torch.ones(1024, 1024), torch.zeros(1024, 1024)))
a = torch.ones(1024, 1024)
x = traced(a, a)
npr = a[0:512:2]
npr = npr + npr
np.testing.assert_allclose(npr.numpy(), x.numpy())
def test_unsqueeze(self, N=256):
def easy(x, y):
a = torch.unsqueeze(x, 0)
b = torch.unsqueeze(y, 0)
return a + b
traced = torch.jit.trace(easy, (torch.ones(N, N), torch.zeros(N, N)))
a = torch.rand(N, N)
x = traced(a, a)
npr = np.expand_dims(a, 0)
npr = npr + npr
np.testing.assert_allclose(npr, x.numpy())
def _test_softmax(self, device):
def test_softmax(x, y):
a = F.softmax(x, dim=0, dtype=torch.float32)
b = F.softmax(y, dim=0, dtype=torch.float32)
c = F.softmax(x, dim=1, dtype=torch.float32)
d = F.softmax(y, dim=1, dtype=torch.float32)
return a + b + c + d
def test_softmax_neg_index(x, y):
a = F.softmax(x, dim=-2, dtype=torch.float32)
b = F.softmax(y, dim=-2, dtype=torch.float32)
c = F.softmax(x, dim=-1, dtype=torch.float32)
d = F.softmax(y, dim=-1, dtype=torch.float32)
return a + b + c + d
def test_log_softmax(x, y):
a = F.log_softmax(x, dim=0, dtype=torch.float32)
b = F.log_softmax(y, dim=0, dtype=torch.float32)
c = F.log_softmax(x, dim=1, dtype=torch.float32)
d = F.log_softmax(y, dim=1, dtype=torch.float32)
return a + b + c + d
for test in (test_softmax, test_log_softmax, test_softmax_neg_index):
for data_type in self.dtypes:
old = torch._C._jit_set_texpr_reductions_enabled(True)
traced_input = torch.randn(2, 3, dtype=data_type, device=device)
traced = torch.jit.trace(test, (traced_input, traced_input))
inp = torch.randn(2, 3, dtype=data_type, device=device)
res = traced(inp, inp)
# Use eager mode as reference.
ref = test(inp, inp)
np.testing.assert_allclose(ref, res.cpu().numpy(), rtol=1e-06, atol=1e-06)
torch._C._jit_set_texpr_reductions_enabled(old)
def test_softmax_cpu(self):
self._test_softmax('cpu')
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
@unittest.skip("global allocs are not supported yet.")
def test_softmax_cuda(self):
self._test_softmax('cuda')
def test_half_gelu(self):
devices = ["cuda"] if torch.cuda.is_available() else []
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
for device in devices:
a = torch.rand(1024, dtype=torch.half, device=device)
b = torch.rand(1024, dtype=torch.half, device=device)
traced = torch.jit.trace(bias_gelu, (a, b))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
def test_half_bn_relu(self):
devices = ["cuda"] if torch.cuda.is_available() else []
def foo(a, b, c):
y = torch.nn.functional.batch_norm(a, b, c)
z = y.relu()
return z
for device in devices:
a = torch.rand(16, 16, dtype=torch.half, device=device)
b = torch.rand(16, dtype=torch.half, device=device)
c = torch.rand(16, dtype=torch.half, device=device)
traced = torch.jit.trace(foo, (a, b, c))
print(traced.graph)
x = warmup_and_run_forward(traced, a, b, c)
self.assertLastGraphAllFused()
def test_exp_pow(self):
@torch.jit.script
def do_exp(x, y, z):
return ((x * y) * 2) * torch.pow(z, 2)
for device in self.devices:
x = torch.rand(10, dtype=torch.double, device=device)
y = torch.rand(10, dtype=torch.double, device=device)
z = torch.rand(10, dtype=torch.double, device=device)
traced = torch.jit.trace(do_exp, (x, y, z))
x = warmup_and_run_forward(traced, x, y, z)
self.assertLastGraphAllFused()
def test_sin_pow(self):
def test(x):
return torch.sin(torch.pow(x, 0))
for data_type, shape in itertools.product(self.dtypes, [[3], [5], [10]]):
x = torch.rand(shape, dtype=data_type)
scripted = torch.jit.script(test)
out = warmup_and_run_forward(scripted, x)
self.assertLastGraphAllFused()
self.assertEqual(out, test(x))
def test_transpose(self):
@torch.jit.script
def test(x, y, z):
return x.transpose(0, 1) + y + z
x = torch.rand(4, 5, 2, 3)
y = torch.rand(5, 4, 2, 3)
z = torch.rand(5, 4, 2, 3)
ref = test(x, y, z)
res = test(x, y, z)
np.testing.assert_allclose(ref.numpy(), res.numpy())
def test_sliced_stride(self):
@torch.jit.script
def test(x, y, z):
return x + y + z
x = torch.rand(16, 4, 2, 3)[::2]
y = torch.rand(8, 4, 2, 3)
z = torch.rand(8, 4, 2, 3)
ref = test(x, y, z)
res = test(x, y, z)
np.testing.assert_allclose(ref.numpy(), res.numpy())
@unittest.skip("dynamic shapes are not quite there yet")
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
def test_dynamic_shape(self):
with num_profiled_runs(2):
@torch.jit.script
def test(x, y, z):
return x * y * z
x, y, z = (torch.rand(4, 8).cuda() for _ in range(3))
ref = test(x, y, z)
_ = test(*[torch.rand(6, 8).cuda() for _ in range(3)])
res = test(x, y, z)
np.testing.assert_allclose(ref.cpu().numpy(), res.cpu().numpy())
# A wild broadcast appears.
x = torch.rand(4, 8).cuda()
y = torch.rand(1, 8).cuda()
z = torch.rand(4, 1).cuda()
res = test(x, y, z)
xn, yn, zn = (t.cpu().numpy() for t in (x, y, z))
np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)
# Mismatched shapes shouldn't reach codegen.
x = torch.rand(4, 8).cuda()
y = torch.rand(4, 8).cuda()
z = torch.rand(5, 8).cuda()
try:
res = test(x, y, z)
except RuntimeError as e:
if "The size of tensor a (4) must match" not in e.args[0]:
raise AssertionError(f"unexpected error message: {e.args[0]}") from None
# Changing a static dimension fails guards.
# x, y, z = [torch.rand(4, 7).cuda() for _ in range(3)]
# xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]
# res = test(x, y, z)
# print(test.graph_for(x, y, z))
# np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
def test_guard_fails(self):
@torch.jit.script
def test(x, y, z):
return x * y * z
r1 = test(*[torch.rand(4).cuda() for _ in range(3)])
r2 = test(*[torch.rand(4).cuda() for _ in range(3)])
r3 = test(*[torch.rand(4).cuda() for _ in range(3)])
r4 = test(*[torch.rand(7).cuda() for _ in range(3)])
def test_bitwise_ops(self):
def run_and(x, y):
return x & (x & y)
def run_or(x, y):
return x & (x | y)
def run_xor(x, y):
return x ^ (x ^ y)
def run_lshift(x, y):
return x & (x << y)
def run_rshift(x, y):
return x & (x >> y)
fns = {run_and, run_or, run_xor, run_lshift, run_rshift}
for device in self.devices:
for fn in fns:
a = torch.ones(128, dtype=torch.int32, device=device)
b = torch.zeros(128, dtype=torch.int32, device=device)
inp = torch.ones(128, dtype=torch.int32, device=device)
traced = torch.jit.trace(fn, (inp, inp))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = fn(a, b)
np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())
def test_where(self):
def run_where(x, y):
return torch.where(torch.gt(x, y), x, y)
for data_type in self.dtypes:
a = torch.rand(1024, dtype=data_type)
b = torch.rand(1024, dtype=data_type)
zeros = torch.zeros(1024, dtype=data_type)
traced = torch.jit.trace(run_where, (zeros, zeros))
x = warmup_and_run_forward(traced, a, b)
self.assertLastGraphAllFused()
y = run_where(a, b)
np.testing.assert_allclose(x.float().numpy(), y.float().numpy())
def test_multi_rand(self):
for device in self.devices:
def test(x):
y = torch.rand_like(x)
return (x + y) - (y - x)
_atol = 2e-3
_rtol = 1e-5
for data_type in self.dtypes:
if data_type is torch.bfloat16:
_atol = 2e-2
a = torch.rand(4, dtype=data_type, device=device)
scripted = torch.jit.script(test)
out = warmup_and_run_forward(scripted, a)
self.assertLastGraphAllFused()
if not torch.allclose(out, 2 * a, atol=_atol, rtol=_rtol):
raise AssertionError("output does not match expected")
def test_mask(self):
def test(x):
return x.unsqueeze(1) == 0
for d in self.devices:
for data_type in self.dtypes:
x = torch.rand(4, dtype=data_type, device=d) > 0.5
scripted = torch.jit.script(test)
out = warmup_and_run_forward(scripted, x)
self.assertLastGraphAllFused()
if not torch.equal(out, test(x)):
raise AssertionError("output does not match expected")
def test_simple_add(self):
val = torch._C._jit_get_te_generate_block_code()
torch._C._jit_set_te_generate_block_code(True)
fall_bk = torch._C._jit_texpr_fallback_allowed()
torch._C._jit_texpr_set_fallback_allowed(True)
def simple(a, b):
return torch.add(a, b)
a = torch.ones(256, 256)
b = torch.ones(256, 256)
traced = torch.jit.trace(simple,
(torch.ones(256, 256), torch.ones(256, 256)))
f = traced(a, b)
f_test = np.full((256, 256), 2, dtype=float)
np.testing.assert_allclose(f.numpy(), f_test)
torch._C._jit_set_te_generate_block_code(val)
torch._C._jit_texpr_set_fallback_allowed(fall_bk)
def test_strided_output_preserved(self):
def foo(a, b):
return a + b - a
# smaller, easier to debug example
x = torch.arange(6)
x = torch.as_strided(x, (2, 3), (1, 2))
total = 0
for i in range(2):
for j in range(3):
x[i, j] = total
total += 1
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
out_s = foo_script(x, x)
out_eager = foo(x, x)
self.assertEqual(out_s, out_eager)
self.assertEqual(out_s.stride(), out_eager.stride())
self.assertLastGraphAllFused()
# more dims
N, C, H, W, = 2, 3, 4, 5
x = torch.rand(N, C, H, W).to(memory_format=torch.channels_last)
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
out_s = foo_script(x, x)
out_eager = foo(x, x)
self.assertEqual(out_s, out_eager)
self.assertEqual(out_s.stride(), out_eager.stride())
self.assertLastGraphAllFused()
def test_alias_analysis_module(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
z = z + self.a
self.b.add_(y)
w = z + self.a
z = w + x
return z
x = torch.randn(128, 128)
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
ref = am(x, x, x)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
# Now do the aliasing
am.a = am.b
ref = am(x, x, x)
am_s.a = am_s.b
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_alias_analysis_inputs(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.a
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_alias_analysis_input_and_module(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.b
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am.b = x
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am_s.b = x
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
def test_multiple_outputs(self):
for device in self.devices:
# A bug reported internally similar to the one reported in #48533
def foo(a, b, c):
t_next = c + 1
t5 = t_next * b
t6 = torch.unsqueeze(t_next, 1)
t7 = a * t6
return (t7, t5, t_next)
for data_type in self.dtypes:
a = torch.rand(20, 20, dtype=data_type, device=device)
b = torch.rand(20 * 29, dtype=data_type, device=device).as_strided([20], [29])
c = torch.ones(20, dtype=torch.int64, device=device)
traced = torch.jit.trace(foo, (a, b, c))
ref = foo(a, b, c)
exp = traced(a, b, c)
exp = traced(a, b, c)
self.assertEqual(ref, exp)
def test_propagated_mem_layout(self):
def foo(a, b, c):
t_next = c + 1
t5 = t_next * b
t7 = a * t5
return t7
def foo_multi_outputs(a, b, c):
t_next = c + 1
t5 = b * t_next
t7 = a * t5
return (t7, t5, t_next)
def foo_multi_outputs_i_nhwc_o_nchw(a, b, c):
t_next = c + 1
t5 = b * t_next
t7 = a * t5
t8 = t7.to(memory_format=torch.contiguous_format)
return (t8, t7, t5, t_next)
def run_foo_case(foo, a, b, c):
traced_contiguous = torch.jit.trace(foo, (a, b, c))
ref = foo(a, b, c)
exp = traced_contiguous(a, b, c)
exp = traced_contiguous(a, b, c)
self.assertEqual(ref, exp)
mem_layouts = list(itertools.product([torch.contiguous_format, torch.channels_last], repeat=3))
shapes = [(2, 3, 4, 5), (2, 1, 1, 5), (1, 1, 1, 1)]
permutes = [(0, 3, 2, 1), (0, 3, 1, 2)]
funcs = [foo, foo_multi_outputs, foo_multi_outputs_i_nhwc_o_nchw]
configs = itertools.product(funcs, shapes, mem_layouts, permutes)
for strategy in ["STATIC", "DYNAMIC"]:
old_strategy = torch.jit.set_fusion_strategy([(strategy, 10)])
for _func, _shape, _mem_layouts, _permute in configs:
a = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[0])
b = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[1])
c = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[2])
run_foo_case(_func, a, b, c)
a = a.permute(dims=_permute)
b = b.permute(dims=_permute)
c = c.permute(dims=_permute)
run_foo_case(_func, a, b, c)
torch.jit.set_fusion_strategy(old_strategy)
if __name__ == '__main__':
run_tests() | python | github | https://github.com/pytorch/pytorch | test/test_tensorexpr.py |
import collections
import glob
import os
import re
import sys
import traceback
if 'mtimes' not in globals():
mtimes = {}
if 'lastfiles' not in globals():
lastfiles = set()
def make_signature(f):
return f.func_code.co_filename, f.func_name, f.func_code.co_firstlineno
def format_plug(plug, kind='', lpad=0, width=40):
out = ' ' * lpad + '%s:%s:%s' % make_signature(plug[0])
if kind == 'command':
out += ' ' * (50 - len(out)) + plug[1]['name']
if kind == 'event':
out += ' ' * (50 - len(out)) + ', '.join(plug[1]['events'])
if kind == 'regex':
out += ' ' * (50 - len(out)) + plug[1]['regex']
return out
def reload(init=False):
changed = False
if init:
bot.plugs = collections.defaultdict(list)
bot.threads = {}
core_fileset = set(glob.glob(os.path.join("core", "*.py")))
for filename in core_fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
eval(compile(open(filename, 'U').read(), filename, 'exec'),
globals())
except Exception:
traceback.print_exc()
if init: # stop if there's an error (syntax?) in a core
sys.exit() # script on startup
continue
if filename == os.path.join('core', 'reload.py'):
reload(init=init)
return
fileset = set(glob.glob(os.path.join('plugins', '*.py')))
# remove deleted/moved plugins
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data if x[0]._filename in fileset]
for filename in list(mtimes):
if filename not in fileset and filename not in core_fileset:
mtimes.pop(filename)
for func, handler in list(bot.threads.iteritems()):
if func._filename not in fileset:
handler.stop()
del bot.threads[func]
# compile new plugins
for filename in fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
code = compile(open(filename, 'U').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
except Exception:
traceback.print_exc()
continue
# remove plugins already loaded from this filename
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data
if x[0]._filename != filename]
for func, handler in list(bot.threads.iteritems()):
if func._filename == filename:
handler.stop()
del bot.threads[func]
for obj in namespace.itervalues():
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
bot.threads[obj] = Handler(obj)
for type, data in obj._hook:
bot.plugs[type] += [data]
if not init:
print '### new plugin (type: %s) loaded:' % \
type, format_plug(data)
if changed:
bot.commands = {}
for plug in bot.plugs['command']:
name = plug[1]['name'].lower()
if not re.match(r'^\w+$', name):
print '### ERROR: invalid command name "%s" (%s)' % (name,
format_plug(plug))
continue
if name in bot.commands:
print "### ERROR: command '%s' already registered (%s, %s)" % \
(name, format_plug(bot.commands[name]),
format_plug(plug))
continue
bot.commands[name] = plug
bot.events = collections.defaultdict(list)
for func, args in bot.plugs['event']:
for event in args['events']:
bot.events[event].append((func, args))
if init:
print ' plugin listing:'
if bot.commands:
# hack to make commands with multiple aliases
# print nicely
print ' command:'
commands = collections.defaultdict(list)
for name, (func, args) in bot.commands.iteritems():
commands[make_signature(func)].append(name)
for sig, names in sorted(commands.iteritems()):
names.sort(key=lambda x: (-len(x), x)) # long names first
out = ' ' * 6 + '%s:%s:%s' % sig
out += ' ' * (50 - len(out)) + ', '.join(names)
print out
for kind, plugs in sorted(bot.plugs.iteritems()):
if kind == 'command':
continue
print ' %s:' % kind
for plug in plugs:
print format_plug(plug, kind=kind, lpad=6)
print | unknown | codeparrot/codeparrot-clean | ||
import type { LVal, Node, TSType } from '@babel/types'
import type { ScriptCompileContext } from './context'
import { inferRuntimeType } from './resolveType'
import { UNKNOWN_TYPE, isCallOf, toRuntimeTypeString } from './utils'
import { BindingTypes, unwrapTSNode } from '@vue/compiler-dom'
export const DEFINE_MODEL = 'defineModel'
export interface ModelDecl {
type: TSType | undefined
options: string | undefined
identifier: string | undefined
runtimeOptionNodes: Node[]
}
export function processDefineModel(
ctx: ScriptCompileContext,
node: Node,
declId?: LVal,
): boolean {
if (!isCallOf(node, DEFINE_MODEL)) {
return false
}
ctx.hasDefineModelCall = true
const type =
(node.typeParameters && node.typeParameters.params[0]) || undefined
let modelName: string
let options: Node | undefined
const arg0 = node.arguments[0] && unwrapTSNode(node.arguments[0])
const hasName = arg0 && arg0.type === 'StringLiteral'
if (hasName) {
modelName = arg0.value
options = node.arguments[1]
} else {
modelName = 'modelValue'
options = arg0
}
if (ctx.modelDecls[modelName]) {
ctx.error(`duplicate model name ${JSON.stringify(modelName)}`, node)
}
let optionsString = options && ctx.getString(options)
let optionsRemoved = !options
const runtimeOptionNodes: Node[] = []
if (
options &&
options.type === 'ObjectExpression' &&
!options.properties.some(p => p.type === 'SpreadElement' || p.computed)
) {
let removed = 0
for (let i = options.properties.length - 1; i >= 0; i--) {
const p = options.properties[i]
const next = options.properties[i + 1]
const start = p.start!
const end = next ? next.start! : options.end! - 1
if (
(p.type === 'ObjectProperty' || p.type === 'ObjectMethod') &&
((p.key.type === 'Identifier' &&
(p.key.name === 'get' || p.key.name === 'set')) ||
(p.key.type === 'StringLiteral' &&
(p.key.value === 'get' || p.key.value === 'set')))
) {
// remove runtime-only options from prop options to avoid duplicates
optionsString =
optionsString.slice(0, start - options.start!) +
optionsString.slice(end - options.start!)
} else {
// remove prop options from runtime options
removed++
ctx.s.remove(ctx.startOffset! + start, ctx.startOffset! + end)
// record prop options for invalid scope var reference check
runtimeOptionNodes.push(p)
}
}
if (removed === options.properties.length) {
optionsRemoved = true
ctx.s.remove(
ctx.startOffset! + (hasName ? arg0.end! : options.start!),
ctx.startOffset! + options.end!,
)
}
}
ctx.modelDecls[modelName] = {
type,
options: optionsString,
runtimeOptionNodes,
identifier:
declId && declId.type === 'Identifier' ? declId.name : undefined,
}
// register binding type
ctx.bindingMetadata[modelName] = BindingTypes.PROPS
// defineModel -> useModel
ctx.s.overwrite(
ctx.startOffset! + node.callee.start!,
ctx.startOffset! + node.callee.end!,
ctx.helper('useModel'),
)
// inject arguments
ctx.s.appendLeft(
ctx.startOffset! +
(node.arguments.length ? node.arguments[0].start! : node.end! - 1),
`__props, ` +
(hasName
? ``
: `${JSON.stringify(modelName)}${optionsRemoved ? `` : `, `}`),
)
return true
}
export function genModelProps(ctx: ScriptCompileContext): string | undefined {
if (!ctx.hasDefineModelCall) return
const isProd = !!ctx.options.isProd
let modelPropsDecl = ''
for (const [name, { type, options: runtimeOptions }] of Object.entries(
ctx.modelDecls,
)) {
let skipCheck = false
let codegenOptions = ``
let runtimeTypes = type && inferRuntimeType(ctx, type)
if (runtimeTypes) {
const hasBoolean = runtimeTypes.includes('Boolean')
const hasFunction = runtimeTypes.includes('Function')
const hasUnknownType = runtimeTypes.includes(UNKNOWN_TYPE)
if (hasUnknownType) {
if (hasBoolean || hasFunction) {
runtimeTypes = runtimeTypes.filter(t => t !== UNKNOWN_TYPE)
skipCheck = true
} else {
runtimeTypes = ['null']
}
}
if (!isProd) {
codegenOptions =
`type: ${toRuntimeTypeString(runtimeTypes)}` +
(skipCheck ? ', skipCheck: true' : '')
} else if (hasBoolean || (runtimeOptions && hasFunction)) {
// preserve types if contains boolean, or
// function w/ runtime options that may contain default
codegenOptions = `type: ${toRuntimeTypeString(runtimeTypes)}`
} else {
// able to drop types in production
}
}
let decl: string
if (codegenOptions && runtimeOptions) {
decl = ctx.isTS
? `{ ${codegenOptions}, ...${runtimeOptions} }`
: `Object.assign({ ${codegenOptions} }, ${runtimeOptions})`
} else if (codegenOptions) {
decl = `{ ${codegenOptions} }`
} else if (runtimeOptions) {
decl = runtimeOptions
} else {
decl = `{}`
}
modelPropsDecl += `\n ${JSON.stringify(name)}: ${decl},`
// also generate modifiers prop
const modifierPropName = JSON.stringify(
name === 'modelValue' ? `modelModifiers` : `${name}Modifiers`,
)
modelPropsDecl += `\n ${modifierPropName}: {},`
}
return `{${modelPropsDecl}\n }`
} | typescript | github | https://github.com/vuejs/core | packages/compiler-sfc/src/script/defineModel.ts |
"""
Core methods
------------
.. autofunction:: cache_toolbox.core.get_instance
.. autofunction:: cache_toolbox.core.delete_instance
.. autofunction:: cache_toolbox.core.instance_key
"""
from django.core.cache import cache
from django.db import DEFAULT_DB_ALIAS
from opaque_keys import InvalidKeyError
from . import app_settings
def get_instance(model, instance_or_pk, timeout=None, using=None):
"""
Returns the ``model`` instance with a primary key of ``instance_or_pk``.
If the data is cached it will be returned from there, otherwise the regular
Django ORM is queried for this instance and the data stored in the cache.
If omitted, the timeout value defaults to
``settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT`` instead of 0 (zero).
Example::
>>> get_instance(User, 1) # Cache miss
<User: lamby>
>>> get_instance(User, 1) # Cache hit
<User: lamby>
>>> User.objects.get(pk=1) == get_instance(User, 1)
True
"""
pk = getattr(instance_or_pk, 'pk', instance_or_pk)
key = instance_key(model, instance_or_pk)
data = cache.get(key)
if data is not None:
try:
# Try and construct instance from dictionary
instance = model(pk=pk, **data)
# Ensure instance knows that it already exists in the database,
# otherwise we will fail any uniqueness checks when saving the
# instance.
instance._state.adding = False
# Specify database so that instance is setup correctly. We don't
# namespace cached objects by their origin database, however.
instance._state.db = using or DEFAULT_DB_ALIAS
return instance
except:
# Error when deserialising - remove from the cache; we will
# fallback and return the underlying instance
cache.delete(key)
# Use the default manager so we are never filtered by a .get_query_set()
# import logging
# log = logging.getLogger("tracking")
# log.info( str(pk) )
instance = model._default_manager.using(using).get(pk=pk)
data = {}
for field in instance._meta.fields:
# Harmless to save, but saves space in the dictionary - we already know
# the primary key when we lookup
if field.primary_key:
continue
if field.get_internal_type() == 'FileField':
# Avoid problems with serializing FileFields
# by only serializing the file name
file = getattr(instance, field.attname)
data[field.attname] = file.name
else:
data[field.attname] = getattr(instance, field.attname)
if timeout is None:
timeout = app_settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT
cache.set(key, data, timeout)
return instance
def delete_instance(model, *instance_or_pk):
"""
Purges the cache keys for the instances of this model.
"""
cache.delete_many([instance_key(model, x) for x in instance_or_pk])
def instance_key(model, instance_or_pk):
"""
Returns the cache key for this (model, instance) pair.
"""
return '%s.%s:%d' % (
model._meta.app_label,
model._meta.module_name,
getattr(instance_or_pk, 'pk', instance_or_pk),
)
def set_cached_content(content):
cache.set(unicode(content.location).encode("utf-8"), content)
def get_cached_content(location):
return cache.get(unicode(location).encode("utf-8"))
def del_cached_content(location):
"""
delete content for the given location, as well as for content with run=None.
it's possible that the content could have been cached without knowing the
course_key - and so without having the run.
"""
def location_str(loc):
return unicode(loc).encode("utf-8")
locations = [location_str(location)]
try:
locations.append(location_str(location.replace(run=None)))
except InvalidKeyError:
# although deprecated keys allowed run=None, new keys don't if there is no version.
pass
cache.delete_many(locations) | unknown | codeparrot/codeparrot-clean | ||
---
applies_to:
stack: ga
serverless: ga
navigation_title: Query multiple sources
---
# Query multiple indices or clusters with {{esql}}
{{esql}} allows you to query across multiple indices or clusters. Learn more in the following sections:
* [Query multiple indices](esql-multi-index.md)
* [Query across clusters](esql-cross-clusters.md) | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/esql-multi.md |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.json;
import org.assertj.core.api.AssertProvider;
import org.jspecify.annotations.Nullable;
import org.springframework.core.ResolvableType;
import org.springframework.util.Assert;
/**
* Object content usually created from {@link AbstractJsonMarshalTester}. Generally used
* only to {@link AssertProvider provide} {@link ObjectContentAssert} to AssertJ
* {@code assertThat} calls.
*
* @param <T> the content type
* @author Phillip Webb
* @since 1.4.0
*/
public final class ObjectContent<T> implements AssertProvider<ObjectContentAssert<T>> {
private final @Nullable ResolvableType type;
private final T object;
/**
* Create a new {@link ObjectContent} instance.
* @param type the type under test (or {@code null} if not known)
* @param object the actual object content
*/
public ObjectContent(@Nullable ResolvableType type, T object) {
Assert.notNull(object, "'object' must not be null");
this.type = type;
this.object = object;
}
@Override
public ObjectContentAssert<T> assertThat() {
return new ObjectContentAssert<>(this.object);
}
/**
* Return the actual object content.
* @return the object content
*/
public T getObject() {
return this.object;
}
@Override
public String toString() {
String createdFrom = (this.type != null) ? " created from " + this.type : "";
return "ObjectContent " + this.object + createdFrom;
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/json/ObjectContent.java |
# -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CallableSettingWrapper, ExceptionReporter, technical_500_response,
)
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
if six.PY3:
from .py3_test_debug import Py3ExceptionReporterTests # NOQA
class User(object):
def __str__(self):
return 'jacob'
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
with self.assertRaises(BrokenException):
self.client.get(reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
try:
html = reporter.get_traceback_html()
except BrokenEvaluation:
self.fail("Broken evaluation in traceback is not caught.")
self.assertIn(
"BrokenEvaluation",
html,
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False) | unknown | codeparrot/codeparrot-clean | ||
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.n.Y'
SHORT_DATETIME_FORMAT = 'j.n.Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y', # '20.3.14'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59'
'%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200'
'%d.%m.%Y %H.%M', # '20.3.2014 14.30'
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200'
'%d.%m.%y %H.%M', # '20.3.14 14.30'
'%d.%m.%y', # '20.3.14'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # Non-breaking space
NUMBER_GROUPING = 3 | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.admindocs import views
from django.urls import path, re_path
urlpatterns = [
path(
"",
views.BaseAdminDocsView.as_view(template_name="admin_doc/index.html"),
name="django-admindocs-docroot",
),
path(
"bookmarklets/",
views.BookmarkletsView.as_view(),
name="django-admindocs-bookmarklets",
),
path(
"tags/",
views.TemplateTagIndexView.as_view(),
name="django-admindocs-tags",
),
path(
"filters/",
views.TemplateFilterIndexView.as_view(),
name="django-admindocs-filters",
),
path(
"views/",
views.ViewIndexView.as_view(),
name="django-admindocs-views-index",
),
path(
"views/<view>/",
views.ViewDetailView.as_view(),
name="django-admindocs-views-detail",
),
path(
"models/",
views.ModelIndexView.as_view(),
name="django-admindocs-models-index",
),
re_path(
r"^models/(?P<app_label>[^.]+)\.(?P<model_name>[^/]+)/$",
views.ModelDetailView.as_view(),
name="django-admindocs-models-detail",
),
path(
"templates/<path:template>/",
views.TemplateDetailView.as_view(),
name="django-admindocs-templates",
),
] | python | github | https://github.com/django/django | django/contrib/admindocs/urls.py |
from __future__ import absolute_import
from .Visitor import CythonTransform
from .StringEncoding import EncodedString
from . import Options
from . import PyrexTypes, ExprNodes
class EmbedSignature(CythonTransform):
def __init__(self, context):
super(EmbedSignature, self).__init__(context)
self.denv = None # XXX
self.class_name = None
self.class_node = None
unop_precedence = 11
binop_precedence = {
'or': 1,
'and': 2,
'not': 3,
'in': 4, 'not in': 4, 'is': 4, 'is not': 4, '<': 4, '<=': 4, '>': 4, '>=': 4, '!=': 4, '==': 4,
'|': 5,
'^': 6,
'&': 7,
'<<': 8, '>>': 8,
'+': 9, '-': 9,
'*': 10, '/': 10, '//': 10, '%': 10,
# unary: '+': 11, '-': 11, '~': 11
'**': 12}
def _fmt_expr_node(self, node, precedence=0):
if isinstance(node, ExprNodes.BinopNode) and not node.inplace:
new_prec = self.binop_precedence.get(node.operator, 0)
result = '%s %s %s' % (self._fmt_expr_node(node.operand1, new_prec),
node.operator,
self._fmt_expr_node(node.operand2, new_prec))
if precedence > new_prec:
result = '(%s)' % result
elif isinstance(node, ExprNodes.UnopNode):
result = '%s%s' % (node.operator,
self._fmt_expr_node(node.operand, self.unop_precedence))
if precedence > self.unop_precedence:
result = '(%s)' % result
elif isinstance(node, ExprNodes.AttributeNode):
result = '%s.%s' % (self._fmt_expr_node(node.obj), node.attribute)
else:
result = node.name
return result
def _fmt_arg_defv(self, arg):
default_val = arg.default
if not default_val:
return None
if isinstance(default_val, ExprNodes.NullNode):
return 'NULL'
try:
denv = self.denv # XXX
ctval = default_val.compile_time_value(self.denv)
repr_val = repr(ctval)
if isinstance(default_val, ExprNodes.UnicodeNode):
if repr_val[:1] != 'u':
return u'u%s' % repr_val
elif isinstance(default_val, ExprNodes.BytesNode):
if repr_val[:1] != 'b':
return u'b%s' % repr_val
elif isinstance(default_val, ExprNodes.StringNode):
if repr_val[:1] in 'ub':
return repr_val[1:]
return repr_val
except Exception:
try:
return self._fmt_expr_node(default_val)
except AttributeError as e:
return '<???>'
def _fmt_arg(self, arg):
if arg.type is PyrexTypes.py_object_type or arg.is_self_arg:
doc = arg.name
else:
doc = arg.type.declaration_code(arg.name, for_display=1)
if arg.default:
arg_defv = self._fmt_arg_defv(arg)
if arg_defv:
doc = doc + ('=%s' % arg_defv)
return doc
def _fmt_arglist(self, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
hide_self=False):
arglist = []
for arg in args:
if not hide_self or not arg.entry.is_self_arg:
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
arglist.insert(npargs, '*%s' % pargs.name)
elif nkargs:
arglist.insert(npargs, '*')
if kargs:
arglist.append('**%s' % kargs.name)
return arglist
def _fmt_ret_type(self, ret):
if ret is PyrexTypes.py_object_type:
return None
else:
return ret.declaration_code("", for_display=1)
def _fmt_signature(self, cls_name, func_name, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
return_type=None, hide_self=False):
arglist = self._fmt_arglist(args,
npargs, pargs,
nkargs, kargs,
hide_self=hide_self)
arglist_doc = ', '.join(arglist)
func_doc = '%s(%s)' % (func_name, arglist_doc)
if cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
if return_type:
ret_doc = self._fmt_ret_type(return_type)
if ret_doc:
func_doc = '%s -> %s' % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
if node_doc:
return "%s\n%s" % (signature, node_doc)
else:
return signature
def __call__(self, node):
if not Options.docstrings:
return node
else:
return super(EmbedSignature, self).__call__(node)
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
self.class_node = node
try:
# PyClassDefNode
self.class_name = node.name
except AttributeError:
# CClassDefNode
self.class_name = node.class_name
self.visitchildren(node)
self.class_name = oldname
self.class_node = oldclass
return node
def visit_LambdaNode(self, node):
# lambda expressions so not have signature or inner functions
return node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
is_constructor = False
hide_self = False
if node.entry.is_special:
is_constructor = self.class_node and node.name == '__init__'
if not is_constructor:
return node
class_name, func_name = None, self.class_name
hide_self = True
else:
class_name, func_name = self.class_name, node.name
nkargs = getattr(node, 'num_kwonly_args', 0)
npargs = len(node.args) - nkargs
signature = self._fmt_signature(
class_name, func_name, node.args,
npargs, node.star_arg,
nkargs, node.starstar_arg,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor:
doc_holder = self.class_node.entry.type.scope
else:
doc_holder = node.entry
if doc_holder.doc is not None:
old_doc = doc_holder.doc
elif not is_constructor and getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
doc_holder.doc = EncodedString(new_doc)
if not is_constructor and getattr(node, 'py_func', None) is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_CFuncDefNode(self, node):
if not self.current_directives['embedsignature']:
return node
if not node.overridable: # not cpdef FOO(...):
return node
signature = self._fmt_signature(
self.class_name, node.declarator.base.name,
node.declarator.args,
return_type=node.return_type)
if signature:
if node.entry.doc is not None:
old_doc = node.entry.doc
elif getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
node.entry.doc = EncodedString(new_doc)
if hasattr(node, 'py_func') and node.py_func is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_PropertyNode(self, node):
if not self.current_directives['embedsignature']:
return node
entry = node.entry
if entry.visibility == 'public':
# property synthesised from a cdef public attribute
type_name = entry.type.declaration_code("", for_display=1)
if not entry.type.is_pyobject:
type_name = "'%s'" % type_name
elif entry.type.is_extension_type:
type_name = entry.type.module_name + '.' + type_name
signature = '%s: %s' % (entry.name, type_name)
new_doc = self._embed_signature(signature, entry.doc)
entry.doc = EncodedString(new_doc)
return node | unknown | codeparrot/codeparrot-clean | ||
# Next.js Documentation Conventions
Complete reference for frontmatter schema, code block formatting, and MDX component usage.
## Frontmatter Schema
All MDX files must start with YAML frontmatter enclosed in `---` delimiters.
### Required Fields
| Field | Description | Example |
| ------------- | ------------------------------------------- | ------------------------------------------------ |
| `title` | Page title for SEO and headings (2-3 words) | `title: Image Component` |
| `description` | Brief description (1-2 sentences) | `description: Optimize images using next/image.` |
### Optional Fields
| Field | Description | Example |
| ----------- | -------------------------------------------------- | -------------------------------------------- |
| `nav_title` | Shorter title for navigation sidebar | `nav_title: Image` |
| `source` | Pull content from another page (avoid duplication) | `source: app/api-reference/components/image` |
| `related` | Next steps section with related links | See below |
| `version` | Development stage indicator | `version: experimental` |
### Related Links Format
```yaml
---
title: My Feature
description: Description here.
related:
title: Next Steps
description: Learn more about related features.
links:
- app/api-reference/components/image
- app/guides/optimizing/images
---
```
### Version Field Values
- `experimental` - Experimental feature, may change
- `legacy` - Legacy feature, consider alternatives
- `unstable` - Unstable API, not recommended for production
- `RC` - Release candidate
## Code Block Conventions
### Basic Syntax
````
```language filename="path/to/file.ext"
code here
```
````
### Required Attributes
| Attribute | When to Use | Example |
| ----------- | --------------------------------- | ------------------------- |
| `filename` | Always for code examples | `filename="app/page.tsx"` |
| `switcher` | When providing TS and JS variants | `switcher` |
| `highlight` | To highlight specific lines | `highlight={1,3-5}` |
### TypeScript/JavaScript Switcher Pattern
Always provide TypeScript first, then JavaScript:
````mdx
```tsx filename="app/page.tsx" switcher
import type { Metadata } from 'next'
export const metadata: Metadata = {
title: 'My Page',
}
```
```jsx filename="app/page.js" switcher
export const metadata = {
title: 'My Page',
}
```
````
### Terminal Commands
Use `bash` language without filename:
````mdx
```bash
npm install next
```
````
### Highlighting Lines
```
highlight={1} # Single line
highlight={1,3} # Multiple lines
highlight={1-5} # Range
highlight={1,3-5,8} # Combined
```
## MDX Components
### AppOnly / PagesOnly
Use for router-specific content in shared documentation:
```mdx
<AppOnly>
This content only appears in App Router documentation.
</AppOnly>
<PagesOnly>
This content only appears in Pages Router documentation.
</PagesOnly>
```
**Important:** Include blank lines inside the components for proper markdown parsing.
### Image Component
For themed images with light/dark variants:
```mdx
<Image
alt="Description of the image"
srcLight="/docs/light/image-name.png"
srcDark="/docs/dark/image-name.png"
width={1600}
height={800}
/>
```
### Notes and Callouts
**Single line:**
```mdx
> **Good to know**: Important information here.
```
**Multi-line:**
```mdx
> **Good to know**:
>
> - First point
> - Second point
> - Third point
```
## Props Tables
Use HTML table wrapper for horizontal scroll on mobile:
```mdx
<div style={{ overflowX: 'auto', width: '100%' }}>
| Prop | Example | Type | Status |
| ----------------- | ------------------- | ------- | -------- |
| [`src`](#src) | `src="/image.png"` | String | Required |
| [`alt`](#alt) | `alt="Description"` | String | Required |
| [`width`](#width) | `width={500}` | Integer | - |
</div>
```
### Status Values
- `Required` - Must be provided
- `-` - Optional
- `Deprecated` - Will be removed, use alternative
## Shared Content Pattern
For Pages Router docs that share content with App Router:
**App Router (source):** `docs/01-app/03-api-reference/02-components/image.mdx`
- Contains the full documentation
- Uses `<AppOnly>` and `<PagesOnly>` for router-specific sections
**Pages Router (consumer):** `docs/02-pages/03-api-reference/01-components/image.mdx`
```yaml
---
title: Image Component
description: Optimize images using next/image.
source: app/api-reference/components/image
---
```
The `source` field pulls content from the App Router doc.
## Writing Style
### Voice
- **Guides:** Instructional, use "you" to address users
- **API Reference:** Technical, use imperative verbs ("create", "pass", "return")
### Clarity
- Use plain words over complex alternatives
- Be specific: "the `src` prop" not "this prop"
- Avoid jargon unless explaining it
### Structure
Typical page structure:
1. Brief introduction (what and why)
2. Minimal working example
3. Detailed reference/options
4. Examples for different use cases
5. Related links (via frontmatter)
## File Naming
- Use kebab-case: `generate-metadata.mdx`
- Add numeric prefix for ordering: `01-installation.mdx`
- Index pages: `index.mdx`
## Validation Commands
```bash
pnpm lint # Full lint check
pnpm prettier-fix # Fix formatting
pnpm types # TypeScript check
``` | unknown | github | https://github.com/vercel/next.js | .claude/skills/update-docs/references/DOC-CONVENTIONS.md |
from __future__ import division, absolute_import, print_function
import warnings
import sys
import numpy as np
from numpy.testing import *
import unittest
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3))
class TestArrayEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
self._test_not_equal(c, b)
class TestBuildErrorMessage(unittest.TestCase):
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ '
'1.00001, 2.00002, 3.00003])\n DESIRED: array([ 1.00002, '
'2.00003, 3.00004])')
self.assertEqual(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
self.assertEqual(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
b = ('\nItems are not equal: There is a mismatch\n FOO: array([ '
'1.00001, 2.00002, 3.00003])\n BAR: array([ 1.00002, 2.00003, '
'3.00004])')
self.assertEqual(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
y = np.array([1.000000002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ '
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([ '
'1.000000002, 2.00003 , 3.00004 ])')
self.assertEqual(a, b)
class TestEqual(TestArrayEqual):
def setUp(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
self.assertRaises(AssertionError,
lambda : self._assert_func(a, b))
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
assert_array_almost_equal(a, b)
assert_array_almost_equal(b, a)
assert_array_almost_equal(b, b)
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almost_equal
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, 1))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, np.inf))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
self.assertRaises(AssertionError,
lambda : self._assert_func(np.inf, 1))
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
def test_error_message(self):
"""Check the message is formatted correctly for the decimal value"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
# test with a different amount of decimal digits
# note that we only check for the formatting of the arrays themselves
b = ('x: array([ 1.00000000001, 2.00000000002, 3.00003 '
' ])\n y: array([ 1.00000000002, 2.00000000003, 3.00004 ])')
try:
self._assert_func(x, y, decimal=12)
except AssertionError as e:
# remove anything that's not the array string
self.assertEqual(str(e).split('%)\n ')[1], b)
# with the default value of decimal digits, only the 3rd element differs
# note that we only check for the formatting of the arrays themselves
b = ('x: array([ 1. , 2. , 3.00003])\n y: array([ 1. , '
'2. , 3.00004])')
try:
self._assert_func(x, y)
except AssertionError as e:
# remove anything that's not the array string
self.assertEqual(str(e).split('%)\n ')[1], b)
class TestApproxEqual(unittest.TestCase):
def setUp(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
x = np.array([1234.22])
y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestRaises(unittest.TestCase):
def setUp(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
f = raises(self.e)(self.raises_exception)(self.e)
def test_wrong_exception(self):
try:
f = raises(self.e)(self.raises_exception)(RuntimeError)
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
f = raises(self.e)(self.does_not_raise_exception)()
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose(unittest.TestCase):
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
self.assertRaises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
self.assertRaises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
self.assertRaises(AssertionError, assert_allclose, a, b,
rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
# Should not raise:
assert_allclose(a, a)
class TestArrayAlmostEqualNulp(unittest.TestCase):
@dec.knownfailureif(True, "Github issue #347")
def test_simple(self):
np.random.seed(12345)
for i in range(100):
dev = np.random.randn(10)
x = np.ones(10)
y = x + dev * np.finfo(np.float64).eps
assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev))
def test_simple2(self):
x = np.random.randn(10)
y = 2 * x
def failure():
return assert_array_almost_equal_nulp(x, y,
nulp=1000)
self.assertRaises(AssertionError, failure)
def test_big_float32(self):
x = (1e10 * np.random.randn(10)).astype(np.float32)
y = x + 1
assert_array_almost_equal_nulp(x, y, nulp=1000)
def test_big_float64(self):
x = 1e10 * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex(self):
x = np.random.randn(10) + 1j * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex2(self):
x = np.random.randn(10)
y = np.array(x, np.complex) + 1e-16 * np.random.randn(10)
assert_array_almost_equal_nulp(x, y, nulp=1000)
class TestULP(unittest.TestCase):
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
if __name__ == '__main__':
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import EdgeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
from ..executors.executoredge import EdgeDriverWdspecExecutor
__wptrunner__ = {"product": "edge",
"check_args": "check_args",
"browser": "EdgeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"wdspec": "EdgeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, **kwargs):
return {"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {"supports_debugger": False}
class EdgeBrowser(Browser):
used_ports = set()
def __init__(self, logger, webdriver_binary, webdriver_args=None):
Browser.__init__(self, logger)
self.server = EdgeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
self.webdriver_host = "localhost"
self.webdriver_port = self.server.port
def start(self, **kwargs):
print self.server.url
self.server.start()
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the server is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url} | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=invalid-name
"""
Utility library for working with the edx-milestones app
"""
from django.conf import settings
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.django import modulestore
NAMESPACE_CHOICES = {
'ENTRANCE_EXAM': 'entrance_exams'
}
def get_namespace_choices():
"""
Return the enum to the caller
"""
return NAMESPACE_CHOICES
def is_entrance_exams_enabled():
"""
Checks to see if the Entrance Exams feature is enabled
Use this operation instead of checking the feature flag all over the place
"""
return settings.FEATURES.get('ENTRANCE_EXAMS', False)
def is_prerequisite_courses_enabled():
"""
Returns boolean indicating prerequisite courses enabled system wide or not.
"""
return settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES', False) \
and settings.FEATURES.get('MILESTONES_APP', False)
def add_prerequisite_course(course_key, prerequisite_course_key):
"""
It would create a milestone, then it would set newly created
milestones as requirement for course referred by `course_key`
and it would set newly created milestone as fulfillment
milestone for course referred by `prerequisite_course_key`.
"""
if not is_prerequisite_courses_enabled():
return None
from milestones import api as milestones_api
milestone_name = _('Course {course_id} requires {prerequisite_course_id}').format(
course_id=unicode(course_key),
prerequisite_course_id=unicode(prerequisite_course_key)
)
milestone = milestones_api.add_milestone({
'name': milestone_name,
'namespace': unicode(prerequisite_course_key),
'description': _('System defined milestone'),
})
# add requirement course milestone
milestones_api.add_course_milestone(course_key, 'requires', milestone)
# add fulfillment course milestone
milestones_api.add_course_milestone(prerequisite_course_key, 'fulfills', milestone)
def remove_prerequisite_course(course_key, milestone):
"""
It would remove pre-requisite course milestone for course
referred by `course_key`.
"""
if not is_prerequisite_courses_enabled():
return None
from milestones import api as milestones_api
milestones_api.remove_course_milestone(
course_key,
milestone,
)
def set_prerequisite_courses(course_key, prerequisite_course_keys):
"""
It would remove any existing requirement milestones for the given `course_key`
and create new milestones for each pre-requisite course in `prerequisite_course_keys`.
To only remove course milestones pass `course_key` and empty list or
None as `prerequisite_course_keys` .
"""
if not is_prerequisite_courses_enabled():
return None
from milestones import api as milestones_api
#remove any existing requirement milestones with this pre-requisite course as requirement
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="requires")
if course_milestones:
for milestone in course_milestones:
remove_prerequisite_course(course_key, milestone)
# add milestones if pre-requisite course is selected
if prerequisite_course_keys:
for prerequisite_course_key_string in prerequisite_course_keys:
prerequisite_course_key = CourseKey.from_string(prerequisite_course_key_string)
add_prerequisite_course(course_key, prerequisite_course_key)
def get_pre_requisite_courses_not_completed(user, enrolled_courses): # pylint: disable=invalid-name
"""
Makes a dict mapping courses to their unfulfilled milestones using the
fulfillment API of the milestones app.
Arguments:
user (User): the user for whom we are checking prerequisites.
enrolled_courses (CourseKey): a list of keys for the courses to be
checked. The given user must be enrolled in all of these courses.
Returns:
dict[CourseKey: dict[
'courses': list[dict['key': CourseKey, 'display': str]]
]]
If a course has no incomplete prerequisites, it will be excluded from the
dictionary.
"""
if not is_prerequisite_courses_enabled():
return {}
from milestones import api as milestones_api
pre_requisite_courses = {}
for course_key in enrolled_courses:
required_courses = []
fulfillment_paths = milestones_api.get_course_milestones_fulfillment_paths(course_key, {'id': user.id})
for __, milestone_value in fulfillment_paths.items():
for key, value in milestone_value.items():
if key == 'courses' and value:
for required_course in value:
required_course_key = CourseKey.from_string(required_course)
required_course_overview = CourseOverview.get_from_id(required_course_key)
required_courses.append({
'key': required_course_key,
'display': get_course_display_string(required_course_overview)
})
# If there are required courses, add them to the result dict.
if required_courses:
pre_requisite_courses[course_key] = {'courses': required_courses}
return pre_requisite_courses
def get_prerequisite_courses_display(course_descriptor):
"""
It would retrieve pre-requisite courses, make display strings
and return list of dictionary with course key as 'key' field
and course display name as `display` field.
"""
pre_requisite_courses = []
if is_prerequisite_courses_enabled() and course_descriptor.pre_requisite_courses:
for course_id in course_descriptor.pre_requisite_courses:
course_key = CourseKey.from_string(course_id)
required_course_descriptor = modulestore().get_course(course_key)
prc = {
'key': course_key,
'display': get_course_display_string(required_course_descriptor)
}
pre_requisite_courses.append(prc)
return pre_requisite_courses
def get_course_display_string(descriptor):
"""
Returns a string to display for a course or course overview.
Arguments:
descriptor (CourseDescriptor|CourseOverview): a course or course overview.
"""
return ' '.join([
descriptor.display_org_with_default,
descriptor.display_number_with_default
])
def fulfill_course_milestone(course_key, user):
"""
Marks the course specified by the given course_key as complete for the given user.
If any other courses require this course as a prerequisite, their milestones will be appropriately updated.
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills")
for milestone in course_milestones:
milestones_api.add_user_milestone({'id': user.id}, milestone)
def remove_course_milestones(course_key, user, relationship):
"""
Remove all user milestones for the course specified by course_key.
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship=relationship)
for milestone in course_milestones:
milestones_api.remove_user_milestone({'id': user.id}, milestone)
def get_required_content(course, user):
"""
Queries milestones subsystem to see if the specified course is gated on one or more milestones,
and if those milestones can be fulfilled via completion of a particular course content module
"""
required_content = []
if settings.FEATURES.get('MILESTONES_APP', False):
from milestones.exceptions import InvalidMilestoneRelationshipTypeException
# Get all of the outstanding milestones for this course, for this user
try:
milestone_paths = get_course_milestones_fulfillment_paths(
unicode(course.id),
serialize_user(user)
)
except InvalidMilestoneRelationshipTypeException:
return required_content
# For each outstanding milestone, see if this content is one of its fulfillment paths
for path_key in milestone_paths:
milestone_path = milestone_paths[path_key]
if milestone_path.get('content') and len(milestone_path['content']):
for content in milestone_path['content']:
required_content.append(content)
return required_content
def milestones_achieved_by_user(user, namespace):
"""
It would fetch list of milestones completed by user
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.get_user_milestones({'id': user.id}, namespace)
def is_valid_course_key(key):
"""
validates course key. returns True if valid else False.
"""
try:
course_key = CourseKey.from_string(key)
except InvalidKeyError:
course_key = key
return isinstance(course_key, CourseKey)
def seed_milestone_relationship_types():
"""
Helper method to pre-populate MRTs so the tests can run
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones.models import MilestoneRelationshipType
MilestoneRelationshipType.objects.create(name='requires')
MilestoneRelationshipType.objects.create(name='fulfills')
def generate_milestone_namespace(namespace, course_key=None):
"""
Returns a specifically-formatted namespace string for the specified type
"""
if namespace in NAMESPACE_CHOICES.values():
if namespace == 'entrance_exams':
return '{}.{}'.format(unicode(course_key), NAMESPACE_CHOICES['ENTRANCE_EXAM'])
def serialize_user(user):
"""
Returns a milestones-friendly representation of a user object
"""
return {
'id': user.id,
}
def add_milestone(milestone_data):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.add_milestone(milestone_data)
def get_milestones(namespace):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return []
from milestones import api as milestones_api
return milestones_api.get_milestones(namespace)
def get_milestone_relationship_types():
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return {}
from milestones import api as milestones_api
return milestones_api.get_milestone_relationship_types()
def add_course_milestone(course_id, relationship, milestone):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.add_course_milestone(course_id, relationship, milestone)
def get_course_milestones(course_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return []
from milestones import api as milestones_api
return milestones_api.get_course_milestones(course_id)
def add_course_content_milestone(course_id, content_id, relationship, milestone):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.add_course_content_milestone(course_id, content_id, relationship, milestone)
def get_course_content_milestones(course_id, content_id, relationship):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return []
from milestones import api as milestones_api
return milestones_api.get_course_content_milestones(course_id, content_id, relationship)
def remove_course_content_user_milestones(course_key, content_key, user, relationship):
"""
Removes the specified User-Milestone link from the system for the specified course content module.
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return []
from milestones import api as milestones_api
course_content_milestones = milestones_api.get_course_content_milestones(course_key, content_key, relationship)
for milestone in course_content_milestones:
milestones_api.remove_user_milestone({'id': user.id}, milestone)
def remove_content_references(content_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.remove_content_references(content_id)
def any_unfulfilled_milestones(course_id, user_id):
""" Returns a boolean if user has any unfulfilled milestones """
if not settings.FEATURES.get('MILESTONES_APP', False):
return False
return bool(
get_course_milestones_fulfillment_paths(course_id, {"id": user_id})
)
def get_course_milestones_fulfillment_paths(course_id, user_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.get_course_milestones_fulfillment_paths(
course_id,
user_id
)
def add_user_milestone(user, milestone):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('MILESTONES_APP', False):
return None
from milestones import api as milestones_api
return milestones_api.add_user_milestone(user, milestone) | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "NamespaceAliaser.h"
#include "ASTUtils.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Lex/Lexer.h"
#include <optional>
namespace clang::tidy::utils {
using namespace ast_matchers;
namespace {
AST_MATCHER_P(NamespaceAliasDecl, hasTargetNamespace,
ast_matchers::internal::Matcher<NamespaceDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getNamespace(), Finder, Builder);
}
} // namespace
NamespaceAliaser::NamespaceAliaser(const SourceManager &SourceMgr)
: SourceMgr(SourceMgr) {}
std::optional<FixItHint>
NamespaceAliaser::createAlias(ASTContext &Context, const Stmt &Statement,
StringRef Namespace,
const std::vector<std::string> &Abbreviations) {
const FunctionDecl *Function = getSurroundingFunction(Context, Statement);
if (!Function || !Function->hasBody())
return std::nullopt;
if (AddedAliases[Function].contains(Namespace.str()))
return std::nullopt;
// FIXME: Doesn't consider the order of declarations.
// If we accidentally pick an alias defined later in the function,
// the output won't compile.
// FIXME: Also doesn't consider file or class-scope aliases.
const auto *ExistingAlias = selectFirst<NamedDecl>(
"alias", match(functionDecl(hasBody(compoundStmt(has(declStmt(
has(namespaceAliasDecl(hasTargetNamespace(hasName(
std::string(Namespace))))
.bind("alias"))))))),
*Function, Context));
if (ExistingAlias != nullptr) {
AddedAliases[Function][Namespace.str()] = ExistingAlias->getName().str();
return std::nullopt;
}
for (const auto &Abbreviation : Abbreviations) {
const DeclarationMatcher ConflictMatcher = namedDecl(hasName(Abbreviation));
const auto HasConflictingChildren =
!match(findAll(ConflictMatcher), *Function, Context).empty();
const auto HasConflictingAncestors =
!match(functionDecl(hasAncestor(decl(has(ConflictMatcher)))), *Function,
Context)
.empty();
if (HasConflictingAncestors || HasConflictingChildren)
continue;
const std::string Declaration =
(llvm::Twine("\nnamespace ") + Abbreviation + " = " + Namespace + ";")
.str();
const SourceLocation Loc =
Lexer::getLocForEndOfToken(Function->getBody()->getBeginLoc(), 0,
SourceMgr, Context.getLangOpts());
AddedAliases[Function][Namespace.str()] = Abbreviation;
return FixItHint::CreateInsertion(Loc, Declaration);
}
return std::nullopt;
}
std::string NamespaceAliaser::getNamespaceName(ASTContext &Context,
const Stmt &Statement,
StringRef Namespace) const {
const auto *Function = getSurroundingFunction(Context, Statement);
auto FunctionAliases = AddedAliases.find(Function);
if (FunctionAliases != AddedAliases.end()) {
if (FunctionAliases->second.contains(Namespace))
return FunctionAliases->second.find(Namespace)->getValue();
}
return Namespace.str();
}
} // namespace clang::tidy::utils | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/utils/NamespaceAliaser.cpp |
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* This class models a sequence of literals and a backward reference copy. */
#ifndef BROTLI_ENC_COMMAND_H_
#define BROTLI_ENC_COMMAND_H_
#include "../common/constants.h"
#include "../common/platform.h"
#include "fast_log.h"
#include "params.h"
#include "prefix.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
BROTLI_INTERNAL extern const BROTLI_MODEL("small")
uint32_t kBrotliInsBase[BROTLI_NUM_INS_COPY_CODES];
BROTLI_INTERNAL extern const BROTLI_MODEL("small")
uint32_t kBrotliInsExtra[BROTLI_NUM_INS_COPY_CODES];
BROTLI_INTERNAL extern const BROTLI_MODEL("small")
uint32_t kBrotliCopyBase[BROTLI_NUM_INS_COPY_CODES];
BROTLI_INTERNAL extern const BROTLI_MODEL("small")
uint32_t kBrotliCopyExtra[BROTLI_NUM_INS_COPY_CODES];
static BROTLI_INLINE uint16_t GetInsertLengthCode(size_t insertlen) {
if (insertlen < 6) {
return (uint16_t)insertlen;
} else if (insertlen < 130) {
uint32_t nbits = Log2FloorNonZero(insertlen - 2) - 1u;
return (uint16_t)((nbits << 1) + ((insertlen - 2) >> nbits) + 2);
} else if (insertlen < 2114) {
return (uint16_t)(Log2FloorNonZero(insertlen - 66) + 10);
} else if (insertlen < 6210) {
return 21u;
} else if (insertlen < 22594) {
return 22u;
} else {
return 23u;
}
}
static BROTLI_INLINE uint16_t GetCopyLengthCode(size_t copylen) {
if (copylen < 10) {
return (uint16_t)(copylen - 2);
} else if (copylen < 134) {
uint32_t nbits = Log2FloorNonZero(copylen - 6) - 1u;
return (uint16_t)((nbits << 1) + ((copylen - 6) >> nbits) + 4);
} else if (copylen < 2118) {
return (uint16_t)(Log2FloorNonZero(copylen - 70) + 12);
} else {
return 23u;
}
}
static BROTLI_INLINE uint16_t CombineLengthCodes(
uint16_t inscode, uint16_t copycode, BROTLI_BOOL use_last_distance) {
uint16_t bits64 =
(uint16_t)((copycode & 0x7u) | ((inscode & 0x7u) << 3u));
if (use_last_distance && inscode < 8u && copycode < 16u) {
return (copycode < 8u) ? bits64 : (bits64 | 64u);
} else {
/* Specification: 5 Encoding of ... (last table) */
/* offset = 2 * index, where index is in range [0..8] */
uint32_t offset = 2u * ((copycode >> 3u) + 3u * (inscode >> 3u));
/* All values in specification are K * 64,
where K = [2, 3, 6, 4, 5, 8, 7, 9, 10],
i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9],
K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D.
All values in D require only 2 bits to encode.
Magic constant is shifted 6 bits left, to avoid final multiplication. */
offset = (offset << 5u) + 0x40u + ((0x520D40u >> offset) & 0xC0u);
return (uint16_t)(offset | bits64);
}
}
static BROTLI_INLINE void GetLengthCode(size_t insertlen, size_t copylen,
BROTLI_BOOL use_last_distance,
uint16_t* code) {
uint16_t inscode = GetInsertLengthCode(insertlen);
uint16_t copycode = GetCopyLengthCode(copylen);
*code = CombineLengthCodes(inscode, copycode, use_last_distance);
}
static BROTLI_INLINE uint32_t GetInsertBase(uint16_t inscode) {
return kBrotliInsBase[inscode];
}
static BROTLI_INLINE uint32_t GetInsertExtra(uint16_t inscode) {
return kBrotliInsExtra[inscode];
}
static BROTLI_INLINE uint32_t GetCopyBase(uint16_t copycode) {
return kBrotliCopyBase[copycode];
}
static BROTLI_INLINE uint32_t GetCopyExtra(uint16_t copycode) {
return kBrotliCopyExtra[copycode];
}
typedef struct Command {
uint32_t insert_len_;
/* Stores copy_len in low 25 bits and copy_code - copy_len in high 7 bit. */
uint32_t copy_len_;
/* Stores distance extra bits. */
uint32_t dist_extra_;
uint16_t cmd_prefix_;
/* Stores distance code in low 10 bits
and number of extra bits in high 6 bits. */
uint16_t dist_prefix_;
} Command;
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
static BROTLI_INLINE void InitCommand(Command* self,
const BrotliDistanceParams* dist, size_t insertlen,
size_t copylen, int copylen_code_delta, size_t distance_code) {
/* Don't rely on signed int representation, use honest casts. */
uint32_t delta = (uint8_t)((int8_t)copylen_code_delta);
self->insert_len_ = (uint32_t)insertlen;
self->copy_len_ = (uint32_t)(copylen | (delta << 25));
/* The distance prefix and extra bits are stored in this Command as if
npostfix and ndirect were 0, they are only recomputed later after the
clustering if needed. */
PrefixEncodeCopyDistance(
distance_code, dist->num_direct_distance_codes,
dist->distance_postfix_bits, &self->dist_prefix_, &self->dist_extra_);
GetLengthCode(
insertlen, (size_t)((int)copylen + copylen_code_delta),
TO_BROTLI_BOOL((self->dist_prefix_ & 0x3FF) == 0), &self->cmd_prefix_);
}
static BROTLI_INLINE void InitInsertCommand(Command* self, size_t insertlen) {
self->insert_len_ = (uint32_t)insertlen;
self->copy_len_ = 4 << 25;
self->dist_extra_ = 0;
self->dist_prefix_ = BROTLI_NUM_DISTANCE_SHORT_CODES;
GetLengthCode(insertlen, 4, BROTLI_FALSE, &self->cmd_prefix_);
}
static BROTLI_INLINE uint32_t CommandRestoreDistanceCode(
const Command* self, const BrotliDistanceParams* dist) {
if ((self->dist_prefix_ & 0x3FFu) <
BROTLI_NUM_DISTANCE_SHORT_CODES + dist->num_direct_distance_codes) {
return self->dist_prefix_ & 0x3FFu;
} else {
uint32_t dcode = self->dist_prefix_ & 0x3FFu;
uint32_t nbits = self->dist_prefix_ >> 10;
uint32_t extra = self->dist_extra_;
uint32_t postfix_mask = (1U << dist->distance_postfix_bits) - 1U;
uint32_t hcode = (dcode - dist->num_direct_distance_codes -
BROTLI_NUM_DISTANCE_SHORT_CODES) >>
dist->distance_postfix_bits;
uint32_t lcode = (dcode - dist->num_direct_distance_codes -
BROTLI_NUM_DISTANCE_SHORT_CODES) & postfix_mask;
uint32_t offset = ((2U + (hcode & 1U)) << nbits) - 4U;
return ((offset + extra) << dist->distance_postfix_bits) + lcode +
dist->num_direct_distance_codes + BROTLI_NUM_DISTANCE_SHORT_CODES;
}
}
static BROTLI_INLINE uint32_t CommandDistanceContext(const Command* self) {
uint32_t r = self->cmd_prefix_ >> 6;
uint32_t c = self->cmd_prefix_ & 7;
if ((r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2)) {
return c;
}
return 3;
}
static BROTLI_INLINE uint32_t CommandCopyLen(const Command* self) {
return self->copy_len_ & 0x1FFFFFF;
}
static BROTLI_INLINE uint32_t CommandCopyLenCode(const Command* self) {
uint32_t modifier = self->copy_len_ >> 25;
int32_t delta = (int8_t)((uint8_t)(modifier | ((modifier & 0x40) << 1)));
return (uint32_t)((int32_t)(self->copy_len_ & 0x1FFFFFF) + delta);
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_COMMAND_H_ */ | c | github | https://github.com/nodejs/node | deps/brotli/c/enc/command.h |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by the deployment helper library."""
class ConfigError(Exception):
"""Top level exception for all Deploy errors."""
pass
class DeployEmptyConfigError(ConfigError):
"""Either config file not found, or empty config file."""
pass
class BadStaticReferenceError(ConfigError):
"""Could not resolve the static reference at parse time."""
pass
class InvalidPdError(ConfigError):
"""The PD name or specs are invalid in the configuration file."""
pass
class InvalidVmNameError(ConfigError):
"""The node names are not valid VM names."""
pass
class NoClusterSectionInConfigError(ConfigError):
"""No cluster section was specified in the configuration file."""
pass
class NoClusterTypeInConfigError(ConfigError):
"""No cluster type was specified in the configuration file."""
pass
class NoSetupModulesInConfigError(ConfigError):
"""No setup module list was specified in the configuration file."""
pass
class NoProjectInConfigError(ConfigError):
"""No project specified in the configuration file."""
pass
class NoZoneInConfigError(ConfigError):
"""No zone specified in the configuration file."""
pass
class NoAdminUserInConfigError(ConfigError):
"""No administrator user specified in the configuration file."""
pass
class NoNetworkSectionInConfigError(ConfigError):
"""No network section was specified in the configuration file."""
pass
class NoNetworkNameInConfigError(ConfigError):
"""No network name specified in the configuration file."""
pass
class NoTcpPortsInConfigError(ConfigError):
"""No TCP ports specified in the network section of the configuration file."""
pass
class NoUdpPortsInConfigError(ConfigError):
"""No UDP ports specified in the network section of the configuration file."""
pass
class InvalidNetworkPortInConfigError(ConfigError):
"""Not a single node definition was found on configuration file."""
pass
class NoNodeTypesInConfigError(ConfigError):
"""Not a single node definition was found on configuration file."""
pass | unknown | codeparrot/codeparrot-clean | ||
import os
"""
All configuration for the datawake web server.
To change the configuration you should set the appropriate environment variables.
Environment variables are used to set conf to conform to standard docker practices.
REQUIRED ENVIRONMENT VARIABLES
DW_DB: database name
DW_DB_USER: database username
DW_DB_PASSWORD: database password
DW_DB_HOST: database ip address or hostname
DW_DB_PORT: database port
DW_KAFKA_CONN_POOL: comma seperated list of kafka brokers ip:port,..
DW_KAFKA_PUB_TOPIC: kafka topic to publish visited urls for processing.
OPTIONAL ENVIRONMENT VARIABLES
DW_GOOGLE_CLIENT_IDS: list of client ids used for google user authentication
DW_MOCK_AUTH: If set actual user authentication is bypassed for browser plugins. (for dev / demos only)
DW_MOCK_FORENSIC_AUTH: If set actual user authentication is bypassed for forensic views. (for dev / demos only)
DW_CONN_TYPE: Determines mysql or impala / hbase is used to store the generated web index data. default=mysql. can by mysql or cluster
DW_IMPALA_HOSTS: Comma separated list of impala hosts (cluster only)
DW_IMPALA_PORT: impala port
DW_HBASE_HOST: hbase host name (cluster only)
DW_HBASE_NAMESPACE: hbase namespace (cluster only, default: default)
DW_HBASE_DOMAIN_ENTITIES_TABLE (cluster only, default: datawake_domain_entities_hbase)
DW_HBASE_EXTRACTED_ALL_TABLE (cluster only, default: general_extractor_web_index_hbase)
DW_HBASE_EXTRACTED_DOMAIN_TABLE (cluster only, default: domain_extractor_web_index_hbase)
DW_EXTERNAL_LINK_NAMES: Comma separated list of links names to provide for extracted features found in the domain index.
DW_EXTERNAL_LINK_VALUES: Comma separated list of links to provide for extracted features found in the domain index.
The link text may contain "$ATTR" and/or "$VALUE", which will be replaced with an extracted type and value such as "phone" and "5555555555"
"""
VERSION_NUMBER="0.5.2-SNAPSHOT"
# enforce requirement for all required paramaters to be set
REQUIRED_PARAMS = [
'DW_DB',
'DW_DB_USER',
'DW_DB_PASSWORD',
'DW_DB_HOST',
'DW_DB_PORT',
'DW_KAFKA_CONN_POOL',
'DW_KAFKA_PUB_TOPIC',
]
not_found = []
for param in REQUIRED_PARAMS:
if param not in os.environ:
not_found.append(param)
if len(not_found) > 0:
raise ValueError("Datawake required environment variables not set: "+str(not_found))
# read required params
DATAWAKE_CORE_DB = {
'database': os.environ['DW_DB'],
'user': os.environ['DW_DB_USER'],
'password':os.environ['DW_DB_PASSWORD'],
'host': os.environ['DW_DB_HOST'],
'port': os.environ['DW_DB_PORT']
}
KAFKA_CONN_POOL=os.environ['DW_KAFKA_CONN_POOL']
KAFKA_PUBLISH_TOPIC=os.environ['DW_KAFKA_PUB_TOPIC']
# read optional params
CLIENT_IDS = []
if 'DW_GOOGLE_CLIENT_IDS' in os.environ:
CLIENT_IDS = os.environ['DW_GOOGLE_CLIENT_IDS'].strip().split(',')
MOCK_AUTH = 'DW_MOCK_AUTH' in os.environ
MOCK_FORENSIC_AUTH = 'DW_MOCK_FORENSIC_AUTH' in os.environ
# can be "cluster" or "mysql"
ENTITY_CONNECTION = 'mysql'
if 'DW_CONN_TYPE' in os.environ:
ENTITY_CONNECTION = os.environ['DW_CONN_TYPE'].lower()
if ENTITY_CONNECTION != 'mysql' and ENTITY_CONNECTION != 'cluster-impala' and ENTITY_CONNECTION != 'cluster-hbase':
raise ValueError("DW_CONN_TYPE must be 'mysql' or 'cluster-impala', or 'cluster-hbase' if set. ")
IMPALA_HOSTS = os.environ['DW_IMPALA_HOSTS'].strip().split(',') if 'DW_IMPALA_HOSTS' in os.environ else []
IMPALA_PORT = os.environ['DW_IMPALA_PORT'] if 'DW_IMPALA_PORT' in os.environ else '21050'
IMPALA_DB = os.environ['DW_IMPALA_DB'] if 'DW_IMPALA_DB' in os.environ else 'default'
IMPALA_DOMAIN_ENTITIES_TABLE = os.environ['DW_IMPALA_DOMAIN_ENTITIES_TABLE'] if 'DW_IMPALA_DOMAIN_ENTITIES_TABLE' in os.environ else 'datawake_domain_entities'
IMPALA_EXTRACTED_ALL_TABLE = os.environ['DW_IMPALA_EXTRACTED_ALL_TABLE'] if 'DW_IMPALA_EXTRACTED_ALL_TABLE' in os.environ else 'general_extractor_web_index'
IMPALA_EXTRACTED_DOMAIN_TABLE = os.environ['DW_IMPALA_EXTRACTED_DOMAIN_TABLE'] if 'DW_IMPALA_EXTRACTED_DOMAIN_TABLE' in os.environ else 'domain_extractor_web_index'
HBASE_HOST = os.environ['DW_HBASE_HOST'] if 'DW_HBASE_HOST' in os.environ else 'NO HBASE HOST SET'
HBASE_PORT = os.environ['DW_HBASE_PORT'] if 'DW_HBASE_PORT' in os.environ else '9090'
HBASE_NAMESPACE = os.environ['DW_HBASE_NAMESPACE'] if 'DW_HBASE_NAMESPACE' in os.environ else 'default'
HBASE_DOMAIN_ENTITIES_TABLE = os.environ['DW_HBASE_DOMAIN_ENTITIES_TABLE'] if 'DW_HBASE_DOMAIN_ENTITIES_TABLE' in os.environ else 'datawake_domain_entities_hbase'
HBASE_EXTRACTED_ALL_TABLE = os.environ['DW_HBASE_EXTRACTED_ALL_TABLE'] if 'DW_HBASE_EXTRACTED_ALL_TABLE' in os.environ else 'general_extractor_web_index_hbase'
HBASE_EXTRACTED_DOMAIN_TABLE = os.environ['DW_HBASE_EXTRACTED_DOMAIN_TABLE'] if 'DW_HBASE_EXTRACTED_DOMAIN_TABLE' in os.environ else 'domain_extractor_web_index_hbase'
#
# Link to external tools. provide a list of links in the form:
# {'display':'display name','link':"..."}
# The link text may contain "$ATTR" and/or "$VALUE"
# which will be replaced with an extracted type and value such as "phone" and "5555555555"
#
EXTERNAL_LINKS = []
if 'DW_EXTERNAL_LINK_NAMES' in os.environ or 'DW_EXTERNAL_LINK_VALUES' in os.environ :
try:
linkNames = os.environ['DW_EXTERNAL_LINK_NAMES'].strip().split(',')
linkValues = os.environ['DW_EXTERNAL_LINK_VALUES'].strip().split(',')
for i in range( max (len(linkNames),len(linkValues))):
EXTERNAL_LINKS.append({'display':linkNames[i],'link':linkValues[i]})
except:
raise ValueError("if DW_LINK_NAMES or DW_LINK_VALUES are set, both must be set and of equal length") | unknown | codeparrot/codeparrot-clean | ||
import { getPreviewPostBySlug } from "../../lib/graphcms";
export default async function handler(req, res) {
// Check the secret and next parameters
// This secret should only be known to this API route and the CMS
if (
req.query.secret !== process.env.GRAPHCMS_PREVIEW_SECRET ||
!req.query.slug
) {
return res.status(401).json({ message: "Invalid token" });
}
// Fetch the headless CMS to check if the provided `slug` exists
const post = await getPreviewPostBySlug(req.query.slug);
// If the slug doesn't exist prevent preview mode from being enabled
if (!post) {
return res.status(401).json({ message: "Invalid slug" });
}
// Enable Draft Mode by setting the cookie
res.setDraftMode({ enable: true });
// Redirect to the path from the fetched post
// We don't redirect to req.query.slug as that might lead to open redirect vulnerabilities
res.writeHead(307, { Location: `/posts/${post.slug}` });
res.end();
} | javascript | github | https://github.com/vercel/next.js | examples/cms-graphcms/pages/api/preview.js |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.