code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
import datetime
import os
import random
from typing import Tuple, Union, TypeVar, List, Callable
import libnacl.secret
from base58 import b58decode
from common.serializers.serialization import serialize_msg_for_signing
from plenum.common.types import f
from plenum.common.util import isHex, cryptonymToHex
from common.error import error
from stp_core.crypto.nacl_wrappers import Verifier
def getMsgWithoutSig(msg, sigFieldName=f.SIG.nm):
msgWithoutSig = {}
for k, v in msg.items():
if k != sigFieldName:
msgWithoutSig[k] = v
return msgWithoutSig
def verifySig(identifier, signature, msg) -> bool:
key = cryptonymToHex(identifier) if not isHex(
identifier) else identifier
ser = serialize_msg_for_signing(msg)
b64sig = signature.encode('utf-8')
sig = b58decode(b64sig)
vr = Verifier(key)
return vr.verify(sig, ser)
def getSymmetricallyEncryptedVal(val, secretKey: Union[str, bytes] = None) -> \
Tuple[str, str]:
"""
Encrypt the provided value with symmetric encryption
:param val: the value to encrypt
:param secretKey: Optional key, if provided should be either in hex or bytes
:return: Tuple of the encrypted value and secret key encoded in hex
"""
if isinstance(val, str):
val = val.encode("utf-8")
if secretKey:
if isHex(secretKey):
secretKey = bytes(bytearray.fromhex(secretKey))
elif not isinstance(secretKey, bytes):
error("Secret key must be either in hex or bytes")
box = libnacl.secret.SecretBox(secretKey)
else:
box = libnacl.secret.SecretBox()
return box.encrypt(val).hex(), box.sk.hex()
def getSymmetricallyDecryptedVal(val, secretKey: Union[str, bytes]) -> str:
if isHex(val):
val = bytes(bytearray.fromhex(val))
elif isinstance(val, str):
val = val.encode("utf-8")
if isHex(secretKey):
secretKey = bytes(bytearray.fromhex(secretKey))
elif isinstance(secretKey, str):
secretKey = secretKey.encode()
box = libnacl.secret.SecretBox(secretKey)
return box.decrypt(val).decode()
def dateTimeEncoding(obj):
if isinstance(obj, datetime.datetime):
return int(obj.strftime('%s'))
raise TypeError('Not sure how to serialize %s' % (obj,))
def getNonce(length=32):
hexChars = [hex(i)[2:] for i in range(0, 16)]
return "".join([random.choice(hexChars) for i in range(length)])
def get_reply_if_confirmed(client, identifier, request_id: int):
reply, status = client.getReply(identifier, request_id)
if status == 'CONFIRMED':
return reply, None
_, errors = \
client.reqRepStore.getAllReplies(identifier, request_id)
if not errors:
return None, None
sender, error_reason = errors.popitem()
return reply, error_reason
# TODO: Should have a timeout, should not have kwargs
def ensureReqCompleted(
loop,
reqKey,
client,
clbk=None,
pargs=None,
kwargs=None,
cond=None):
reply, err = get_reply_if_confirmed(client, *reqKey)
if err is None and reply is None and (cond is None or not cond()):
loop.call_later(.2, ensureReqCompleted, loop,
reqKey, client, clbk, pargs, kwargs, cond)
elif clbk:
# TODO: Do something which makes reply and error optional in the
# callback.
# TODO: This is kludgy, but will be resolved once we move away from
# this callback pattern
if pargs is not None and kwargs is not None:
clbk(reply, err, *pargs, **kwargs)
elif pargs is not None and kwargs is None:
clbk(reply, err, *pargs)
elif pargs is None and kwargs is not None:
clbk(reply, err, **kwargs)
else:
clbk(reply, err)
def getNonceForProof(nonce):
return int(nonce, 16)
T = TypeVar('T')
def getIndex(predicateFn: Callable[[T], bool], items: List[T]) -> int:
"""
Finds the index of an item in list, which satisfies predicate
:param predicateFn: predicate function to run on items of list
:param items: list of tuples
:return: first index for which predicate function returns True
"""
try:
return next(i for i, v in enumerate(items) if predicateFn(v))
except StopIteration:
return -1
def compose_cmd(cmd):
if os.name != 'nt':
cmd = ' '.join(cmd)
return cmd
def invalidate_config_caches():
import stp_core.common.config.util
import plenum.common.config_util
import indy_common.config_util
# All 3 references must be nullified because all they reference
# the same object due to specific logic of getConfig methods
stp_core.common.config.util.CONFIG = None
plenum.common.config_util.CONFIG = None
indy_common.config_util.CONFIG = None
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Class definition:
# NordugridATLASSiteInformation
# This class is the Nordugrid-ATLAS site information class inheriting from ATLASSiteInformation
# Instances are generated with SiteInformationFactory via pUtil::getSiteInformation()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# import relevant python/pilot modules
import os
import commands
import SiteMover
from SiteInformation import SiteInformation # Main site information class
from ATLASSiteInformation import ATLASSiteInformation # Main site information class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from PilotErrors import PilotErrors # Error codes
class NordugridATLASSiteInformation(ATLASSiteInformation):
# private data members
__experiment = "Nordugrid-ATLAS"
__instance = None
# Required methods
def __init__(self):
""" Default initialization """
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASSiteInformation, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
if __name__ == "__main__":
os.environ['PilotHomeDir'] = os.getcwd()
si = NordugridATLASSiteInformation()
tolog("Experiment: %s" % (si.getExperiment()))
cloud = "CERN"
queuename = si.getTier1Queue(cloud)
if queuename != "":
tolog("Cloud %s has Tier-1 queue %s" % (cloud, queuename))
else:
tolog("Failed to find a Tier-1 queue name for cloud %s" % (cloud))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pandas as pd
import numpy as np
import pickle
import os
import math
import glob
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname, train_categorical_onehot_filename
from random import shuffle
from datetime import datetime
def load_and_compress_data(dirname, use_categoric_features=False):
fname = generate_data_fname(sample_type='train', data_type='numeric')
numeric_columns = pd.read_csv(fname, nrows=2).columns
numeric_fnames = sorted(glob.glob1(dirname, "train_numeric_*"))
if use_categoric_features:
fname = train_categorical_onehot_filename
categoric_columns = pd.read_csv(fname, nrows=2).columns
categoric_fnames = sorted(glob.glob1(dirname, "train_categorical_*"))
for list_index in range(len(numeric_fnames)):
numeric_fname = os.path.join(dirname, numeric_fnames[list_index])
numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
del numeric_df['Response']
zfname = "train_numeric_0_" + str(list_index).zfill(3) + ".npz"
# print numeric_fname
# print zfname
if use_categoric_features:
categoric_fname = os.path.join(dirname, categoric_fnames[list_index])
categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
numeric_df = numeric_df.join(categoric_df, how='inner')
del categoric_df
zfname = "train_numeric+categoric_0_" + str(list_index).zfill(3) + ".npz"
# print categoric_fname
# print zfname
print "Saving:", zfname
np.savez_compressed(zfname, data=numeric_df.values)
del numeric_df
if __name__ == '__main__':
load_and_compress_data('bs60000', use_categoric_features=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
from django.core.management import call_command
from django.db import connection
from django.test import override_settings, override_system_checks, TransactionTestCase
class MigrateTests(TransactionTestCase):
"""
Tests running the migrate command in Geodjango.
"""
available_apps = ["django.contrib.gis"]
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.get_table_list(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.get_table_list(cursor))
@skipUnless(HAS_SPATIAL_DB, "Spatial db is required.")
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"gis": "django.contrib.gis.tests.gis_migrations.migrations"})
def test_migrate_gis(self):
"""
Tests basic usage of the migrate command when a model uses Geodjango
fields. Regression test for ticket #22001:
https://code.djangoproject.com/ticket/22001
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_neighborhood")
self.assertTableNotExists("migrations_household")
# Run the migrations to 0001 only
call_command("migrate", "gis", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("gis_neighborhood")
self.assertTableExists("gis_household")
# Unmigrate everything
call_command("migrate", "gis", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("gis_neighborhood")
self.assertTableNotExists("gis_household")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
import decimal
import types
import unicodedata
import urllib
def DisplayFriendlySize(bytes):
"""DisplayFriendlySize -- turn a number of bytes into a nice string"""
t = type(bytes)
if t != types.LongType and t != types.IntType and t != decimal.Decimal:
return 'NotANumber(%s=%s)' %(t, bytes)
if bytes < 1024:
return '%d bytes' % bytes
if bytes < 1024 * 1024:
return '%d kb (%d bytes)' %((bytes / 1024), bytes)
if bytes < 1024 * 1024 * 1024:
return '%d mb (%d bytes)' %((bytes / (1024 * 1024)), bytes)
return '%d gb (%d bytes)' %((bytes / (1024 * 1024 * 1024)), bytes)
def Normalize(value):
normalized = unicodedata.normalize('NFKD', unicode(value))
normalized = normalized.encode('ascii', 'ignore')
return normalized
def read_remote_lines(url):
remote = urllib.urlopen(url)
data = ''
while True:
d = remote.read(100)
if not d:
break
data += d
if data.find('\n') != -1:
elems = data.split('\n')
for line in elems[:-1]:
yield line
data = elems[-1]
if data:
yield data
def read_remote_file(url):
data = []
for line in read_remote_lines(url):
data.append(line)
return '\n'.join(data)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# The MIT License (MIT)
#
# Copyright (c) 2015 Leon Jacobs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
''' A Chat Logger '''
from hogar.static import values as static_values
from hogar.Models.Logger import Logger
import logging
logger = logging.getLogger(__name__)
def enabled ():
'''
Enabled
Is this plugin enabled. Returning false here
will cause this plugin to be ignored by the
framework entirely.
--
@return bool
'''
return True
def applicable_types ():
'''
Applicable Types
Returns the type of messages this plugin is for.
See: hogar.static.values
--
@return list
'''
return static_values.possible_message_types
def commands ():
'''
Commands
In the case of text plugins, returns the commands
that this plugin should trigger for. For other
message types, a empty list should be returned.
--
@return list
'''
return ['*'] # all commands
def should_reply ():
'''
Should Reply
Specifies wether a reply should be sent to the original
sender of the message that triggered this plugin.
--
@return bool
'''
return False
def reply_type ():
'''
Reply Type
Specifies the type of reply that should be sent to the
sender. This is an optional function. See hogar.static.values
for available types.
--
@return str
'''
return 'text'
def _process_from (message, record):
'''
Process From
Take a Telegram Message payload as well as a Hogar
Logger Model and populate the from_* information
--
@param message:dict The message sent by the user
@param record:object The hogar.Models.Logger.Logger object
@return str
'''
# from_username = CharField(null = True, max_length = 250)
# from_first_name = CharField(null = True, max_length = 250)
# from_last_name = CharField(null = True, max_length = 250)
# from_id = IntegerField()
# u'from':{
# u'username':u'username',
# u'first_name':u'first',
# u'last_name':u'last',
# u'id':12345
# },
if 'username' in message['from']:
record.from_username = message['from']['username']
if 'first_name' in message['from']:
record.from_first_name = message['from']['first_name']
if 'last_name' in message['from']:
record.from_last_name = message['from']['last_name']
if 'id' in message['from']:
record.from_id = message['from']['id']
return record
def _process_chat (message, record):
'''
Process Chat
Take a Telegram Message payload as well as a Hogar
Logger Model and populate the chat_* information
--
@param message:dict The message sent by the user
@param record:object The hogar.Models.Logger.Logger object
@return str
'''
# chat_title = CharField(null = True, max_length = 250)
# chat_id = IntegerField()
# chat_username= CharField(null = True, max_length = 250)
# chat_first_name = CharField(null = True, max_length = 250)
# chat_last_name = CharField(null = True, max_length = 250)
# u'chat':{
# u'username':u'dude',
# u'first_name':u'first',
# u'last_name':u'last',
# u'id':12345
# }
# u'chat':{
# u'id':-12345,
# u'title':u'A Group Chat'
# }
if 'title' in message['chat']:
record.chat_title = message['chat']['title']
if 'id' in message['chat']:
record.chat_id = message['chat']['id']
if 'username' in message['chat']:
record.chat_username = message['chat']['username']
if 'first_name' in message['chat']:
record.chat_first_name = message['chat']['first_name']
if 'last_name' in message['chat']:
record.chat_last_name = message['chat']['last_name']
return record
def _process_file_id (message, record):
'''
Process File ID
Take a Telegram Message payload as well as a Hogar
Logger Model and populate the file_id information
--
@param message:dict The message sent by the user
@param record:object The hogar.Models.Logger.Logger object
@return str
'''
# If we are a photo:
# u'photo':[
# {
# u'width':90,
# u'file_size':1458,
# u'file_id':u'123-AEAAQI',
# u'height':90
# }
# ],
if 'photo' in message:
record.file_id = message['photo'][0]['file_id']
# u'sticker':{
# u'width':482,
# u'height':512,
# u'thumb':{
# u'width':84,
# u'file_size':2658,
# u'file_id':u'123-AEAAQI',
# u'height':90
# },
# u'file_id':u'BQADBAADOAADyIsGAAGV20QAAasOeuMC',
# u'file_size':43636
# },
elif 'sticker' in message:
record.file_id = message['sticker']['file_id']
# u'audio':{
# u'duration':1,
# u'file_id':u'123-AEAAQI',
# u'mime_type':u'audio/ogg',
# u'file_size':9162
# },
elif 'audio' in message:
record.file_id = message['audio']['file_id']
# u'document':{
# u'file_name':u'test.js',
# u'file_id':u'123-AEAAQI',
# u'thumb':{},
# u'mime_type':u'application/javascript',
# u'file_size':34728
# },
elif 'document' in message:
record.file_id = message['document']['file_id']
# return
return record
def run (message):
'''
Run
Run the custom plugin specific code. A returned
string is the message that will be sent back
to the user.
--
@param message:dict The message sent by the user
@return str
'''
# Search for the message type
tg_type = [message_type for message_type in static_values.possible_message_types \
if message_type in message]
# Gran the first entry in the above list.
# Should never have more than one anyways.
tg_type = tg_type[0]
# Check if we already know about this message.
# Honestly can't think of a case were this
# will actually happen. Nonetheless, lets
# check and warn.
try:
Logger.get(Logger.message_id == message['message_id'])
logger.warning('Message {id} already exists in the database.'.format(
id = message['message_id']
))
return
except Logger.DoesNotExist:
# Nope. Create it!
logger.debug('Storing message id {id} which is a {tg_type} message'.format(
id = message['message_id'],
tg_type = tg_type
))
# Start a new Logger instance
l = Logger(message_id = message['message_id'])
# Set some fields that are always applicable
# to any message type
l.message_type = tg_type
l.telegram_date = message['date']
# Populate the 'from' details
l = _process_from(message, l)
# Populate the 'chat' details. Chat
# is the actual person/room the
# came from
l = _process_chat(message, l)
# Process any potential file_id's
l = _process_file_id(message, l)
# If there is text, add that too
l.text = message['text'] if 'text' in message else None
# Aaand save.
l.save()
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
pr: 140684
summary: "MMR Command: Grammar and Logical Plan"
area: ES|QL
type: feature
issues: []
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/changelog/140684.yaml
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from report import report_sxw
from openerp.tools.translate import _
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
exec code_exec in localdict
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
report_sxw.report_sxw('report.account.test.assert.print', 'accounting.assert.test', 'addons/account_test/report/account_test.rml', parser=report_assert_account, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
import urllib
from HTMLParser import HTMLParser
class BlogAttachmentPageParser(HTMLParser):
"""HTMLParser used to extract the url of Bing images from a Blog Post Attachment Page from www.iorise.com
(e.g.: http://www.iorise.com/blog/?attachment_id=44)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/blog/wp-content/uploads/20[0-9]{2}/[01][0-9]/.+[.](jpg|jpeg)$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url to the image will be in an achor tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
#if value == 'attachment':
# rel_ok = True
return False
# The tag should not contain any more attributes
else:
return False
return href_ok
class BlogDayPageParser(HTMLParser):
"""HTMLParser used to extract the url of attachment page containing the Bing images from a Day Page from
www.iorise.com (e.g.: http://www.iorise.com/blog/?m=20121125)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/(blog/)?[?]attachment_id=[0-9]+$')
self.rel_chk = re.compile('^attachment wp-att-[0-9]+$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url we are looking for will be in an <a> tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
if self.rel_chk.match(value):
rel_ok = True
# The tag should not contain any more attributes
else:
return False
return href_ok and rel_ok
def extract_all_image_urls(date_to_extract):
""" Function used to extract all Bing images of the day published on iorise between the two provided dates. """
url = "http://www.iorise.com/blog/?m={year}{month:02}{day:02}".format(year=date_to_extract.year,
month=date_to_extract.month,
day=date_to_extract.day)
try:
page = urllib.urlopen(url)
except:
return []
# Extract attachment pages from day page
attachment_pages_url = []
day_page_parser = BlogDayPageParser(attachment_pages_url)
day_page_parser.feed(page.read().decode('UTF-8'))
all_image_urls = []
# For each attachment page, extract the image urls
for page_url in attachment_pages_url:
try:
attachment_page = urllib.urlopen(page_url)
except:
continue
image_urls = []
parser = BlogAttachmentPageParser(image_urls)
parser.feed(attachment_page.read().decode('UTF-8'))
all_image_urls += image_urls
return all_image_urls
|
unknown
|
codeparrot/codeparrot-clean
| ||
package dockerfile
import (
"os"
"path/filepath"
"testing"
)
// createTestTempFile creates a temporary file within dir with specific contents and permissions.
// When an error occurs, it terminates the test
func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string {
filePath := filepath.Join(dir, filename)
err := os.WriteFile(filePath, []byte(contents), perm)
if err != nil {
t.Fatalf("Error when creating %s file: %s", filename, err)
}
return filePath
}
// createTestSymlink creates a symlink file within dir which points to oldname
func createTestSymlink(t *testing.T, dir, filename, oldname string) string {
filePath := filepath.Join(dir, filename)
if err := os.Symlink(oldname, filePath); err != nil {
t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err)
}
return filePath
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/builder/dockerfile/utils_test.go
|
'''
Created on 2016年2月10日
@author: Darren
'''
from SimpleDatabase import SimpleDatabase
import os
import unittest
from random import random
class Test(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.testClass=SimpleDatabase()
self.testClass.print_flag=False
def test_robust(self):
'''the program should not crash for any valid inputs'''
for _ in range(100):
N=10000
commands_d={"BEGIN":0,"END":0,"ROLLBACK":0,"COMMIT":0,"SET":2,"UNSET":1,"GET":1,"NUMEQUALTO":1}
commands=["BEGIN","ROLLBACK","COMMIT","SET","UNSET","GET","NUMEQUALTO","END"]
for _ in range(N):
command=commands[int(random()*(len(commands)-1))]
if command=="NUMEQUALTO":#special case
value=int(random()*100)
command+=" "+str(value)
elif commands_d[command]==1:
key=chr(ord("A")+int(random()*26))
command+=" "+str(key)
elif commands_d[command]==2:
key=chr(ord("A")+int(random()*26))
value=int(random()*100)
command+=" "+str(key) +" "+str(value)
self.testClass.process_command(command)
command=commands[-1]
with self.assertRaises(Exception):
self.testClass.process_command(command)
self.testClass.clear_data()
# def test_robust(self):
# file_name="test3.in"
# for _ in range(10000):
# N=20
# self.generator_test_case(N, file_name)
# self.testClass.run_from_file(file_name)
# self.testClass.clear_data()
def generator_test_case(self,N,file_name):
commands_d={"BEGIN":0,"END":0,"ROLLBACK":0,"COMMIT":0,"SET":2,"UNSET":1,"GET":1,"NUMEQUALTO":1}
commands=["BEGIN","ROLLBACK","COMMIT","SET","UNSET","GET","NUMEQUALTO","END"]
with open(file_name,"w") as file:
for _ in range(N):
command=commands[int(random()*(len(commands)-1))]
if command=="NUMEQUALTO":
value=int(random()*100)
file.write(" ".join([command,str(value)])+"\n")
elif commands_d[command]==1:
key=chr(ord("A")+int(random()*26))
file.write(" ".join([command,key])+"\n")
elif commands_d[command]==2:
key=chr(ord("A")+int(random()*26))
value=int(random()*100)
file.write(" ".join([command,key,str(value)])+"\n")
else:
file.write(" ".join([command])+"\n")
file.write(commands[-1])
def load_result(self,file_name):
try:
file=open(file_name, "r")
res=list(map(lambda x:x.strip("\n"),file.readlines()))
except Exception as e:
print("Error reading file: "+file_name)
return res
def test_valid_input(self):
path="DBTestData/valid"
tests=[x for x in os.listdir(path) if os.path.splitext(x)[1]=='.in']
for test in tests:
actual_result=self.testClass.run_from_file(path+'/'+test)
result_file_name=path+'/'+test[:-3]+'.out'
expected_result=self.load_result(result_file_name)
self.assertEqual(actual_result, expected_result, "Failed with input: "+test)
self.testClass.clear_data()
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* MIT License
*
* Copyright (c) 1998 Massachusetts Institute of Technology
* Copyright (c) 2007 Daniel Stenberg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_private.h"
#ifdef HAVE_SYS_PARAM_H
# include <sys/param.h>
#endif
#ifdef HAVE_NETINET_IN_H
# include <netinet/in.h>
#endif
#ifdef HAVE_NETDB_H
# include <netdb.h>
#endif
#ifdef HAVE_ARPA_INET_H
# include <arpa/inet.h>
#endif
#if defined(USE_WINSOCK)
# if defined(HAVE_IPHLPAPI_H)
# include <iphlpapi.h>
# endif
# if defined(HAVE_NETIOAPI_H)
# include <netioapi.h>
# endif
#endif
#include "ares_inet_net_pton.h"
#if defined(USE_WINSOCK)
# define WIN_NS_9X "System\\CurrentControlSet\\Services\\VxD\\MSTCP"
# define WIN_DNSCLIENT "Software\\Policies\\Microsoft\\System\\DNSClient"
# define WIN_NT_DNSCLIENT \
"Software\\Policies\\Microsoft\\Windows NT\\DNSClient"
# define NAMESERVER "NameServer"
# define DHCPNAMESERVER "DhcpNameServer"
# define SEARCHLIST_KEY L"SearchList"
# define PRIMARYDNSSUFFIX_KEY L"PrimaryDNSSuffix"
# define INTERFACES_KEY "Interfaces"
# define DOMAIN_KEY L"Domain"
# define DHCPDOMAIN_KEY L"DhcpDomain"
/*
* get_REG_SZ()
*
* Given a 'hKey' handle to an open registry key and a 'leafKeyName' pointer
* to the name of the registry leaf key to be queried, fetch it's string
* value and return a pointer in *outptr to a newly allocated memory area
* holding it as a null-terminated string.
*
* Returns 0 and nullifies *outptr upon inability to return a string value.
*
* Returns 1 and sets *outptr when returning a dynamically allocated string.
*
* Supported on Windows NT 3.5 and newer.
*/
static ares_bool_t get_REG_SZ(HKEY hKey, const WCHAR *leafKeyName, char **outptr)
{
DWORD size = 0;
int res;
int len;
WCHAR *val = NULL;
*outptr = NULL;
/* Find out size of string stored in registry */
res = RegQueryValueExW(hKey, leafKeyName, 0, NULL, NULL, &size);
if ((res != ERROR_SUCCESS && res != ERROR_MORE_DATA) || !size) {
return ARES_FALSE;
}
/* Allocate buffer of indicated size plus one given that string
might have been stored without null termination */
val = ares_malloc_zero(size + sizeof(WCHAR));
if (val == NULL) {
return ARES_FALSE;
}
/* Get the value for real */
res = RegQueryValueExW(hKey, leafKeyName, 0, NULL, (BYTE *)val, &size);
if (res != ERROR_SUCCESS || size == 1) {
ares_free(val);
return ARES_FALSE;
}
/* Convert to UTF8 */
len = WideCharToMultiByte(CP_UTF8, 0, val, -1, NULL, 0, NULL, NULL);
if (len == 0) {
return ARES_FALSE;
}
*outptr = ares_malloc_zero((size_t)len + 1);
if (WideCharToMultiByte(CP_UTF8, 0, val, -1, *outptr, len, NULL, NULL)
== 0) {
ares_free(*outptr);
*outptr = NULL;
return ARES_FALSE;
}
return ARES_TRUE;
}
static void commanjoin(char **dst, const char * const src, const size_t len)
{
char *newbuf;
size_t newsize;
/* 1 for terminating 0 and 2 for , and terminating 0 */
newsize = len + (*dst ? (ares_strlen(*dst) + 2) : 1);
newbuf = ares_realloc(*dst, newsize);
if (!newbuf) {
return;
}
if (*dst == NULL) {
*newbuf = '\0';
}
*dst = newbuf;
if (ares_strlen(*dst) != 0) {
strcat(*dst, ",");
}
strncat(*dst, src, len);
}
/*
* commajoin()
*
* RTF code.
*/
static void commajoin(char **dst, const char *src)
{
commanjoin(dst, src, ares_strlen(src));
}
static void commajoin_asciionly(char **dst, const char *src)
{
if (!ares_str_isprint(src, ares_strlen(src))) {
return;
}
commanjoin(dst, src, ares_strlen(src));
}
/* A structure to hold the string form of IPv4 and IPv6 addresses so we can
* sort them by a metric.
*/
typedef struct {
/* The metric we sort them by. */
ULONG metric;
/* Original index of the item, used as a secondary sort parameter to make
* qsort() stable if the metrics are equal */
size_t orig_idx;
/* Room enough for the string form of any IPv4 or IPv6 address that
* ares_inet_ntop() will create. Based on the existing c-ares practice.
*/
char text[INET6_ADDRSTRLEN + 8 + 64]; /* [%s]:NNNNN%iface */
} Address;
/* Sort Address values \a left and \a right by metric, returning the usual
* indicators for qsort().
*/
static int compareAddresses(const void *arg1, const void *arg2)
{
const Address * const left = arg1;
const Address * const right = arg2;
/* Lower metric the more preferred */
if (left->metric < right->metric) {
return -1;
}
if (left->metric > right->metric) {
return 1;
}
/* If metrics are equal, lower original index more preferred */
if (left->orig_idx < right->orig_idx) {
return -1;
}
if (left->orig_idx > right->orig_idx) {
return 1;
}
return 0;
}
#if defined(HAVE_GETBESTROUTE2) && !defined(__WATCOMC__)
/* There can be multiple routes to "the Internet". And there can be different
* DNS servers associated with each of the interfaces that offer those routes.
* We have to assume that any DNS server can serve any request. But, some DNS
* servers may only respond if requested over their associated interface. But
* we also want to use "the preferred route to the Internet" whenever possible
* (and not use DNS servers on a non-preferred route even by forcing request
* to go out on the associated non-preferred interface). i.e. We want to use
* the DNS servers associated with the same interface that we would use to
* make a general request to anything else.
*
* But, Windows won't sort the DNS servers by the metrics associated with the
* routes and interfaces _even_ though it obviously sends IP packets based on
* those same routes and metrics. So, we must do it ourselves.
*
* So, we sort the DNS servers by the same metric values used to determine how
* an outgoing IP packet will go, thus effectively using the DNS servers
* associated with the interface that the DNS requests themselves will
* travel. This gives us optimal routing and avoids issues where DNS servers
* won't respond to requests that don't arrive via some specific subnetwork
* (and thus some specific interface).
*
* This function computes the metric we use to sort. On the interface
* identified by \a luid, it determines the best route to \a dest and combines
* that route's metric with \a interfaceMetric to compute a metric for the
* destination address on that interface. This metric can be used as a weight
* to sort the DNS server addresses associated with each interface (lower is
* better).
*
* Note that by restricting the route search to the specific interface with
* which the DNS servers are associated, this function asks the question "What
* is the metric for sending IP packets to this DNS server?" which allows us
* to sort the DNS servers correctly.
*/
static ULONG getBestRouteMetric(IF_LUID * const luid, /* Can't be const :( */
const SOCKADDR_INET * const dest,
const ULONG interfaceMetric)
{
MIB_IPFORWARD_ROW2 row;
SOCKADDR_INET ignored;
if (GetBestRoute2(/* The interface to use. The index is ignored since we are
* passing a LUID.
*/
luid, 0,
/* No specific source address. */
NULL,
/* Our destination address. */
dest,
/* No options. */
0,
/* The route row. */
&row,
/* The best source address, which we don't need. */
&ignored) != NO_ERROR
/* If the metric is "unused" (-1) or too large for us to add the two
* metrics, use the worst possible, thus sorting this last.
*/
|| row.Metric == (ULONG)-1 ||
row.Metric > ((ULONG)-1) - interfaceMetric) {
/* Return the worst possible metric. */
return (ULONG)-1;
}
/* Return the metric value from that row, plus the interface metric.
*
* See
* http://msdn.microsoft.com/en-us/library/windows/desktop/aa814494(v=vs.85).aspx
* which describes the combination as a "sum".
*/
return row.Metric + interfaceMetric;
}
#endif
/*
* get_DNS_Windows()
*
* Locates DNS info using GetAdaptersAddresses() function from the Internet
* Protocol Helper (IP Helper) API. When located, this returns a pointer
* in *outptr to a newly allocated memory area holding a null-terminated
* string with a space or comma separated list of DNS IP addresses.
*
* Returns 0 and nullifies *outptr upon inability to return DNSes string.
*
* Returns 1 and sets *outptr when returning a dynamically allocated string.
*
* Implementation supports Windows XP and newer.
*/
# define IPAA_INITIAL_BUF_SZ 15 * 1024
# define IPAA_MAX_TRIES 3
static ares_bool_t get_DNS_Windows(char **outptr)
{
IP_ADAPTER_DNS_SERVER_ADDRESS *ipaDNSAddr;
IP_ADAPTER_ADDRESSES *ipaa;
IP_ADAPTER_ADDRESSES *newipaa;
IP_ADAPTER_ADDRESSES *ipaaEntry;
ULONG ReqBufsz = IPAA_INITIAL_BUF_SZ;
ULONG Bufsz = IPAA_INITIAL_BUF_SZ;
ULONG AddrFlags = 0;
int trying = IPAA_MAX_TRIES;
ULONG res;
/* The capacity of addresses, in elements. */
size_t addressesSize;
/* The number of elements in addresses. */
size_t addressesIndex = 0;
/* The addresses we will sort. */
Address *addresses;
union {
struct sockaddr *sa;
struct sockaddr_in *sa4;
struct sockaddr_in6 *sa6;
} namesrvr;
*outptr = NULL;
ipaa = ares_malloc(Bufsz);
if (!ipaa) {
return ARES_FALSE;
}
/* Start with enough room for a few DNS server addresses and we'll grow it
* as we encounter more.
*/
addressesSize = 4;
addresses = (Address *)ares_malloc(sizeof(Address) * addressesSize);
if (addresses == NULL) {
/* We need room for at least some addresses to function. */
ares_free(ipaa);
return ARES_FALSE;
}
/* Usually this call succeeds with initial buffer size */
res = GetAdaptersAddresses(AF_UNSPEC, AddrFlags, NULL, ipaa, &ReqBufsz);
if ((res != ERROR_BUFFER_OVERFLOW) && (res != ERROR_SUCCESS)) {
goto done;
}
while ((res == ERROR_BUFFER_OVERFLOW) && (--trying)) {
if (Bufsz < ReqBufsz) {
newipaa = ares_realloc(ipaa, ReqBufsz);
if (!newipaa) {
goto done;
}
Bufsz = ReqBufsz;
ipaa = newipaa;
}
res = GetAdaptersAddresses(AF_UNSPEC, AddrFlags, NULL, ipaa, &ReqBufsz);
if (res == ERROR_SUCCESS) {
break;
}
}
if (res != ERROR_SUCCESS) {
goto done;
}
for (ipaaEntry = ipaa; ipaaEntry; ipaaEntry = ipaaEntry->Next) {
if (ipaaEntry->OperStatus != IfOperStatusUp) {
continue;
}
/* For each interface, find any associated DNS servers as IPv4 or IPv6
* addresses. For each found address, find the best route to that DNS
* server address _on_ _that_ _interface_ (at this moment in time) and
* compute the resulting total metric, just as Windows routing will do.
* Then, sort all the addresses found by the metric.
*/
for (ipaDNSAddr = ipaaEntry->FirstDnsServerAddress; ipaDNSAddr != NULL;
ipaDNSAddr = ipaDNSAddr->Next) {
char ipaddr[INET6_ADDRSTRLEN] = "";
namesrvr.sa = ipaDNSAddr->Address.lpSockaddr;
if (namesrvr.sa->sa_family == AF_INET) {
if ((namesrvr.sa4->sin_addr.S_un.S_addr == INADDR_ANY) ||
(namesrvr.sa4->sin_addr.S_un.S_addr == INADDR_NONE)) {
continue;
}
/* Allocate room for another address, if necessary, else skip. */
if (addressesIndex == addressesSize) {
const size_t newSize = addressesSize + 4;
Address * const newMem =
(Address *)ares_realloc(addresses, sizeof(Address) * newSize);
if (newMem == NULL) {
continue;
}
addresses = newMem;
addressesSize = newSize;
}
# if defined(HAVE_GETBESTROUTE2) && !defined(__WATCOMC__)
/* OpenWatcom's builtin Windows SDK does not have a definition for
* MIB_IPFORWARD_ROW2, and also does not allow the usage of SOCKADDR_INET
* as a variable. Let's work around this by returning the worst possible
* metric, but only when using the OpenWatcom compiler.
* It may be worth investigating using a different version of the Windows
* SDK with OpenWatcom in the future, though this may be fixed in OpenWatcom
* 2.0.
*/
addresses[addressesIndex].metric = getBestRouteMetric(
&ipaaEntry->Luid, (SOCKADDR_INET *)((void *)(namesrvr.sa)),
ipaaEntry->Ipv4Metric);
# else
addresses[addressesIndex].metric = (ULONG)-1;
# endif
/* Record insertion index to make qsort stable */
addresses[addressesIndex].orig_idx = addressesIndex;
if (!ares_inet_ntop(AF_INET, &namesrvr.sa4->sin_addr, ipaddr,
sizeof(ipaddr))) {
continue;
}
snprintf(addresses[addressesIndex].text,
sizeof(addresses[addressesIndex].text), "[%s]:%u", ipaddr,
ntohs(namesrvr.sa4->sin_port));
++addressesIndex;
} else if (namesrvr.sa->sa_family == AF_INET6) {
unsigned int ll_scope = 0;
struct ares_addr addr;
if (memcmp(&namesrvr.sa6->sin6_addr, &ares_in6addr_any,
sizeof(namesrvr.sa6->sin6_addr)) == 0) {
continue;
}
/* Allocate room for another address, if necessary, else skip. */
if (addressesIndex == addressesSize) {
const size_t newSize = addressesSize + 4;
Address * const newMem =
(Address *)ares_realloc(addresses, sizeof(Address) * newSize);
if (newMem == NULL) {
continue;
}
addresses = newMem;
addressesSize = newSize;
}
/* See if its link-local */
memset(&addr, 0, sizeof(addr));
addr.family = AF_INET6;
memcpy(&addr.addr.addr6, &namesrvr.sa6->sin6_addr, 16);
if (ares_addr_is_linklocal(&addr)) {
ll_scope = ipaaEntry->Ipv6IfIndex;
}
# if defined(HAVE_GETBESTROUTE2) && !defined(__WATCOMC__)
addresses[addressesIndex].metric = getBestRouteMetric(
&ipaaEntry->Luid, (SOCKADDR_INET *)((void *)(namesrvr.sa)),
ipaaEntry->Ipv6Metric);
# else
addresses[addressesIndex].metric = (ULONG)-1;
# endif
/* Record insertion index to make qsort stable */
addresses[addressesIndex].orig_idx = addressesIndex;
if (!ares_inet_ntop(AF_INET6, &namesrvr.sa6->sin6_addr, ipaddr,
sizeof(ipaddr))) {
continue;
}
if (ll_scope) {
snprintf(addresses[addressesIndex].text,
sizeof(addresses[addressesIndex].text), "[%s]:%u%%%u",
ipaddr, ntohs(namesrvr.sa6->sin6_port), ll_scope);
} else {
snprintf(addresses[addressesIndex].text,
sizeof(addresses[addressesIndex].text), "[%s]:%u", ipaddr,
ntohs(namesrvr.sa6->sin6_port));
}
++addressesIndex;
} else {
/* Skip non-IPv4/IPv6 addresses completely. */
continue;
}
}
}
/* Sort all of the textual addresses by their metric (and original index if
* metrics are equal). */
qsort(addresses, addressesIndex, sizeof(*addresses), compareAddresses);
/* Join them all into a single string, removing duplicates. */
{
size_t i;
for (i = 0; i < addressesIndex; ++i) {
size_t j;
/* Look for this address text appearing previously in the results. */
for (j = 0; j < i; ++j) {
if (strcmp(addresses[j].text, addresses[i].text) == 0) {
break;
}
}
/* Iff we didn't emit this address already, emit it now. */
if (j == i) {
/* Add that to outptr (if we can). */
commajoin(outptr, addresses[i].text);
}
}
}
done:
ares_free(addresses);
if (ipaa) {
ares_free(ipaa);
}
if (!*outptr) {
return ARES_FALSE;
}
return ARES_TRUE;
}
/*
* get_SuffixList_Windows()
*
* Reads the "DNS Suffix Search List" from registry and writes the list items
* whitespace separated to outptr. If the Search List is empty, the
* "Primary Dns Suffix" is written to outptr.
*
* Returns 0 and nullifies *outptr upon inability to return the suffix list.
*
* Returns 1 and sets *outptr when returning a dynamically allocated string.
*
* Implementation supports Windows Server 2003 and newer
*/
static ares_bool_t get_SuffixList_Windows(char **outptr)
{
HKEY hKey;
HKEY hKeyEnum;
char keyName[256];
DWORD keyNameBuffSize;
DWORD keyIdx = 0;
char *p = NULL;
*outptr = NULL;
/* 1. Global DNS Suffix Search List */
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0, KEY_READ, &hKey) ==
ERROR_SUCCESS) {
get_REG_SZ(hKey, SEARCHLIST_KEY, outptr);
if (get_REG_SZ(hKey, DOMAIN_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
RegCloseKey(hKey);
}
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_NT_DNSCLIENT, 0, KEY_READ, &hKey) ==
ERROR_SUCCESS) {
if (get_REG_SZ(hKey, SEARCHLIST_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
RegCloseKey(hKey);
}
/* 2. Connection Specific Search List composed of:
* a. Primary DNS Suffix */
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_DNSCLIENT, 0, KEY_READ, &hKey) ==
ERROR_SUCCESS) {
if (get_REG_SZ(hKey, PRIMARYDNSSUFFIX_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
RegCloseKey(hKey);
}
/* b. Interface SearchList, Domain, DhcpDomain */
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY "\\" INTERFACES_KEY, 0,
KEY_READ, &hKey) == ERROR_SUCCESS) {
for (;;) {
keyNameBuffSize = sizeof(keyName);
if (RegEnumKeyExA(hKey, keyIdx++, keyName, &keyNameBuffSize, 0, NULL,
NULL, NULL) != ERROR_SUCCESS) {
break;
}
if (RegOpenKeyExA(hKey, keyName, 0, KEY_QUERY_VALUE, &hKeyEnum) !=
ERROR_SUCCESS) {
continue;
}
/* p can be comma separated (SearchList) */
if (get_REG_SZ(hKeyEnum, SEARCHLIST_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
if (get_REG_SZ(hKeyEnum, DOMAIN_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
if (get_REG_SZ(hKeyEnum, DHCPDOMAIN_KEY, &p)) {
commajoin_asciionly(outptr, p);
ares_free(p);
p = NULL;
}
RegCloseKey(hKeyEnum);
}
RegCloseKey(hKey);
}
return *outptr != NULL ? ARES_TRUE : ARES_FALSE;
}
ares_status_t ares_init_sysconfig_windows(const ares_channel_t *channel,
ares_sysconfig_t *sysconfig)
{
char *line = NULL;
ares_status_t status = ARES_SUCCESS;
if (get_DNS_Windows(&line)) {
status = ares_sconfig_append_fromstr(channel, &sysconfig->sconfig, line,
ARES_TRUE);
ares_free(line);
if (status != ARES_SUCCESS) {
goto done;
}
}
if (get_SuffixList_Windows(&line)) {
sysconfig->domains = ares_strsplit(line, ", ", &sysconfig->ndomains);
ares_free(line);
if (sysconfig->domains == NULL) {
status = ARES_EFILE;
}
if (status != ARES_SUCCESS) {
goto done;
}
}
done:
return status;
}
#endif
|
c
|
github
|
https://github.com/nodejs/node
|
deps/cares/src/lib/ares_sysconfig_win.c
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
# TODO(wickman)
#
# 1. open(foo) should always be done in a with context.
#
# 2. if you see acquire/release on the same variable in a particular ast
# body, warn about context manager use.
class MissingContextManager(CheckstylePlugin):
"""Recommend the use of contextmanagers when it seems appropriate."""
def nits(self):
with_contexts = set(self.iter_ast_types(ast.With))
with_context_calls = set(node.context_expr for node in with_contexts
if isinstance(node.context_expr, ast.Call))
for call in self.iter_ast_types(ast.Call):
if isinstance(call.func, ast.Name) and call.func.id == 'open' \
and (call not in with_context_calls):
yield self.warning('T802', 'open() calls should be made within a contextmanager.', call)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import random
import signal
import sys
from buildscripts.resmokelib.testing.hooks.bghook import BGHook
class PeriodicStackTrace(BGHook):
"""Test hook that sends the stacktracing signal to mongo processes at randomized intervals."""
DESCRIPTION = "Sends the stacktracing signal to mongo processes at randomized intervals."
IS_BACKGROUND = True
def __init__(self, hook_logger, fixture, frequency=1 / 60):
"""Initialize the hook."""
BGHook.__init__(self, hook_logger, fixture, self.DESCRIPTION, loop_delay_ms=1000)
self._fixture = fixture
self._frequency = frequency
def _signal_probability_per_iteration(self):
return self._frequency
def run_action(self):
if sys.platform == "win32":
return
if random.random() <= self._signal_probability_per_iteration():
pids = self._fixture.pids()
if len(pids) == 0:
return
pid = random.choice(pids)
self.logger.info(f"Requesting stacktrace from process {pid}")
os.kill(pid, signal.SIGUSR2)
|
python
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokelib/testing/hooks/periodic_stack_trace.py
|
"""
Configuration for the ``student`` Django application.
"""
from __future__ import absolute_import
import os
from django.apps import AppConfig
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import pre_save
class StudentConfig(AppConfig):
"""
Default configuration for the ``student`` application.
"""
name = 'student'
def ready(self):
from django.contrib.auth.models import update_last_login as django_update_last_login
user_logged_in.disconnect(django_update_last_login)
from .signals.receivers import update_last_login
user_logged_in.connect(update_last_login)
from django.contrib.auth.models import User
from .signals.receivers import on_user_updated
pre_save.connect(on_user_updated, sender=User)
# The django-simple-history model on CourseEnrollment creates performance
# problems in testing, we mock it here so that the mock impacts all tests.
if os.environ.get('DISABLE_COURSEENROLLMENT_HISTORY', False):
import student.models as student_models
from mock import MagicMock
student_models.CourseEnrollment.history = MagicMock()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""jsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
import blog.views
import misc.urls
from sitemap.sitemaps import BlogPostSitemap
sitemaps = {
'blog': BlogPostSitemap,
}
urlpatterns = [
url(r'^$', blog.views.post_all),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^lab/', include('lab.urls', namespace='lab')),
url(
r'^sitemap\.xml$',
sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'
),
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns.extend(misc.urls.urlpatterns)
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
title: Special elements
---
|
unknown
|
github
|
https://github.com/sveltejs/svelte
|
documentation/docs/05-special-elements/index.md
|
# stdlib
from socket import socket
import unittest
import xmlrpclib
# 3p
from mock import patch
# project
from checks import AgentCheck
from tests.checks.common import get_check
class TestSupervisordCheck(unittest.TestCase):
TEST_CASES = [{
'yaml': """
init_config:
instances:
- name: server1
host: localhost
port: 9001""",
'expected_instances': [{
'host': 'localhost',
'name': 'server1',
'port': 9001
}],
'expected_metrics': {
'server1': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:python']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:mysql']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:java']})
]
},
'expected_service_checks': {
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:java'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.UNKNOWN,
'tags': ['supervisord_server:server1', 'supervisord_process:python'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
user: user
pass: pass
proc_names:
- apache2
- webapp
- name: server1
host: 10.60.130.82""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'user': 'user',
'pass': 'pass',
'proc_names': ['apache2', 'webapp'],
}, {
'host': '10.60.130.82',
'name': 'server1'
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:apache2']}),
('supervisord.process.uptime', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:webapp']}),
],
'server1': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:ruby']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:apache2'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:webapp'],
'check': 'supervisord.process.status'
}],
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:ruby'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: invalid_host
port: 9009""",
'expected_instances': [{
'name': 'server0',
'host': 'invalid_host',
'port': 9009
}],
'error_message': """Cannot connect to http://invalid_host:9009. Make sure supervisor is running and XML-RPC inet interface is enabled."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9010
user: invalid_user
pass: invalid_pass""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9010,
'user': 'invalid_user',
'pass': 'invalid_pass'
}],
'error_message': """Username or password to server0 are incorrect."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_names:
- mysql
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_names': ['mysql', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_regex:
- '^mysq.$'
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_regex': ['^mysq.$', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge',
'tags': ['supervisord_server:server0',
'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}]
def setUp(self):
self.patcher = patch('xmlrpclib.Server', self.mock_server)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
# Integration Test #####################################################
def test_check(self):
"""Integration test for supervisord check. Using a mocked supervisord."""
for tc in self.TEST_CASES:
check, instances = get_check('supervisord', tc['yaml'])
self.assertTrue(check is not None, msg=check)
self.assertEquals(tc['expected_instances'], instances)
for instance in instances:
name = instance['name']
try:
# Run the check
check.check(instance)
except Exception, e:
if 'error_message' in tc: # excepted error
self.assertEquals(str(e), tc['error_message'])
else:
self.assertTrue(False, msg=str(e))
else:
# Assert that the check collected the right metrics
expected_metrics = tc['expected_metrics'][name]
self.assert_metrics(expected_metrics, check.get_metrics())
# Assert that the check generated the right service checks
expected_service_checks = tc['expected_service_checks'][name]
self.assert_service_checks(expected_service_checks,
check.get_service_checks())
# Unit Tests ###########################################################
def test_build_message(self):
"""Unit test supervisord build service check message."""
process = {
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}
expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""
check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
self.assertEquals(expected_message, check._build_message(process))
# Helper Methods #######################################################
@staticmethod
def mock_server(url):
return MockXmlRcpServer(url)
def assert_metrics(self, expected, actual):
actual = [TestSupervisordCheck.norm_metric(metric) for metric in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # metrics reported.\n'
'Expected: {0}. Found: {1}'.format(len(expected), len(actual)))
self.assertTrue(all([expected_metric in actual for expected_metric in expected]),
msg='Reported metrics are incorrect.\nExpected: {0}.\n'
'Found: {1}'.format(expected, actual))
def assert_service_checks(self, expected, actual):
actual = [TestSupervisordCheck.norm_service_check(service_check)
for service_check in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # service checks reported.'
'\nExpected: {0}. Found: {1}.'.format(expected, actual))
self.assertTrue(all([expected_service_check in actual
for expected_service_check in expected]),
msg='Reported service checks are incorrect.\nExpected:{0}\n'
'Found:{1}'.format(expected, actual))
@staticmethod
def norm_metric(metric):
'''Removes hostname and timestamp'''
metric[3].pop('hostname')
return (metric[0], metric[2], metric[3])
@staticmethod
def norm_service_check(service_check):
'''Removes timestamp, host_name, message and id'''
for field in ['timestamp', 'host_name', 'message', 'id']:
service_check.pop(field)
return service_check
class MockXmlRcpServer:
"""Class that mocks an XML RPC server. Initialized using a mocked
supervisord server url, which is used to initialize the supervisord
server.
"""
def __init__(self, url):
self.supervisor = MockSupervisor(url)
class MockSupervisor:
"""Class that mocks a supervisord sever. Initialized using the server url
and mocks process methods providing mocked process information for testing
purposes.
"""
MOCK_PROCESSES = {
'http://localhost:9001/RPC2': [{
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}, {
'now': 1414815738,
'group': 'java',
'description': 'Nov 01 04:22 AM',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/java-stderr---supervisor-lSdcKZ.log',
'stop': 1414815722,
'statename': 'STOPPED',
'start': 1414815388,
'state': 0,
'stdout_logfile': '/var/log/java/java.log',
'logfile': '/var/log/java/java.log',
'exitstatus': 21,
'spawnerr': '',
'name': 'java'
}, {
'now': 1414815738,
'group': 'python',
'description': '',
'pid': 2765,
'stderr_logfile': '/var/log/supervisor/python-stderr---supervisor-vFzxIg.log',
'stop': 1414815737,
'statename': 'STARTING',
'start': 1414815737,
'state': 10,
'stdout_logfile': '/var/log/python/python.log',
'logfile': '/var/log/python/python.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'python'
}],
'http://user:pass@localhost:9001/RPC2': [{
'now': 1414869824,
'group': 'apache2',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/apache2-stderr---supervisor-0PkXWd.log',
'stop': 1414867047,
'statename': 'FATAL',
'start': 1414867047,
'state': 200,
'stdout_logfile': '/var/log/apache2/apache2.log',
'logfile': '/var/log/apache2/apache2.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'apache2'
}, {
'now': 1414871104,
'group': 'webapp',
'description': '',
'pid': 17600,
'stderr_logfile': '/var/log/supervisor/webapp-stderr---supervisor-onZK__.log',
'stop': 1414871101,
'statename': 'STOPPING',
'start': 1414871102,
'state': 40,
'stdout_logfile': '/var/log/company/webapp.log',
'logfile': '/var/log/company/webapp.log',
'exitstatus': 1,
'spawnerr': '',
'name': 'webapp'
}],
'http://10.60.130.82:9001/RPC2': [{
'now': 1414871588,
'group': 'ruby',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/ruby-stderr---supervisor-BU7Wat.log',
'stop': 1414871588,
'statename': 'BACKOFF',
'start': 1414871588,
'state': 30,
'stdout_logfile': '/var/log/ruby/ruby.log',
'logfile': '/var/log/ruby/ruby.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'ruby'
}]
}
def __init__(self, url):
self.url = url
def getAllProcessInfo(self):
self._validate_request()
return self.MOCK_PROCESSES[self.url]
def getProcessInfo(self, proc_name):
self._validate_request(proc=proc_name)
for proc in self.MOCK_PROCESSES[self.url]:
if proc['name'] == proc_name:
return proc
raise Exception('Process not found: %s' % proc_name)
def _validate_request(self, proc=None):
'''Validates request and simulates errors when not valid'''
if 'invalid_host' in self.url:
# Simulate connecting to an invalid host/port in order to
# raise `socket.error: [Errno 111] Connection refused`
socket().connect(('localhost', 38837))
elif 'invalid_pass' in self.url:
# Simulate xmlrpc exception for invalid credentials
raise xmlrpclib.ProtocolError(self.url[7:], 401,
'Unauthorized', None)
elif proc is not None and 'invalid' in proc:
# Simulate xmlrpc exception for process not found
raise xmlrpclib.Fault(10, 'BAD_NAME')
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"fmt"
"os"
)
type MmapFile struct {
f *os.File
b []byte
}
func OpenMmapFile(path string) (*MmapFile, error) {
return OpenMmapFileWithSize(path, 0)
}
func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("try lock file: %w", err)
}
defer func() {
if retErr != nil {
f.Close()
}
}()
if size <= 0 {
info, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("stat: %w", err)
}
size = int(info.Size())
}
b, err := mmap(f, size)
if err != nil {
return nil, fmt.Errorf("mmap, size %d: %w", size, err)
}
return &MmapFile{f: f, b: b}, nil
}
func (f *MmapFile) Close() error {
err0 := munmap(f.b)
err1 := f.f.Close()
if err0 != nil {
return err0
}
return err1
}
func (f *MmapFile) File() *os.File {
return f.f
}
func (f *MmapFile) Bytes() []byte {
return f.b
}
|
go
|
github
|
https://github.com/prometheus/prometheus
|
tsdb/fileutil/mmap.go
|
no_wheels:
name: boeing-with-no-wheels
manufactured_at: 2024-01-01
no_manufactured_at:
name: boeing-with-no-manufactured-at
wheels_count: 2
|
unknown
|
github
|
https://github.com/rails/rails
|
activerecord/test/fixtures/aircrafts.yml
|
import PropTypes from "prop-types";
const ErrorMessage = ({ message }) => (
<aside>
{message}
<style jsx>{`
aside {
padding: 1.5em;
font-size: 14px;
color: white;
background-color: red;
}
`}</style>
</aside>
);
ErrorMessage.propTypes = {
message: PropTypes.string.isRequired,
};
export default ErrorMessage;
|
javascript
|
github
|
https://github.com/vercel/next.js
|
examples/with-apollo-and-redux/components/ErrorMessage.js
|
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of contents**
- [Perform review of security issues that are marked for the release](#perform-review-of-security-issues-that-are-marked-for-the-release)
- [Prepare the Apache Airflow Helm Chart Release Candidate](#prepare-the-apache-airflow-helm-chart-release-candidate)
- [Pre-requisites](#pre-requisites)
- [Set environment variables](#set-environment-variables)
- [Setup k8s environment (mainly helm chart)](#setup-k8s-environment-mainly-helm-chart)
- [Build Release Notes](#build-release-notes)
- [Update minimum version of Kubernetes](#update-minimum-version-of-kubernetes)
- [Build RC artifacts](#build-rc-artifacts)
- [Publish rc documentation](#publish-rc-documentation)
- [Prepare issue for testing status of rc](#prepare-issue-for-testing-status-of-rc)
- [Prepare Vote email on the Apache Airflow release candidate](#prepare-vote-email-on-the-apache-airflow-release-candidate)
- [Verify the release candidate by PMC members](#verify-the-release-candidate-by-pmc-members)
- [SVN check](#svn-check)
- [Source tarball reproducibility check](#source-tarball-reproducibility-check)
- [Licence check](#licence-check)
- [Signature check](#signature-check)
- [SHA512 sum check](#sha512-sum-check)
- [Verify release candidates by Contributors](#verify-release-candidates-by-contributors)
- [Publish the final release](#publish-the-final-release)
- [Summarize the voting for the release](#summarize-the-voting-for-the-release)
- [Publish release to SVN](#publish-release-to-svn)
- [Publish release tag](#publish-release-tag)
- [Publish final documentation](#publish-final-documentation)
- [Update `index.yaml` in airflow-site](#update-indexyaml-in-airflow-site)
- [Wait for ArtifactHUB to discover new release](#wait-for-artifacthub-to-discover-new-release)
- [Notify developers of release](#notify-developers-of-release)
- [Send announcements about security issues fixed in the release](#send-announcements-about-security-issues-fixed-in-the-release)
- [Add release data to Apache Committee Report Helper](#add-release-data-to-apache-committee-report-helper)
- [Update Announcements page](#update-announcements-page)
- [Create release on GitHub](#create-release-on-github)
- [Close the milestone](#close-the-milestone)
- [Close the testing status issue](#close-the-testing-status-issue)
- [Update issue template with the new release](#update-issue-template-with-the-new-release)
- [Announce the release on the community slack](#announce-the-release-on-the-community-slack)
- [Announce about the release in social media](#announce-about-the-release-in-social-media)
- [Bump chart version in Chart.yaml](#bump-chart-version-in-chartyaml)
- [Remove old releases](#remove-old-releases)
- [Additional processes](#additional-processes)
- [Fixing released documentation](#fixing-released-documentation)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
You can find the prerequisites to release Apache Airflow in [README.md](README.md). This document
details the steps for releasing Helm Chart.
# Perform review of security issues that are marked for the release
We are keeping track of security issues in the [Security Issues](https://github.com/airflow-s/airflow-s/issues)
repository currently. As a release manager, you should have access to the repository.
Please review and ensure that all security issues marked for the release have been
addressed and resolved. Ping security team (comment in the issues) if anything missing or
the issue does not seem to be addressed.
Additionally, the [dependabot alerts](https://github.com/apache/airflow/security/dependabot) and
code [scanning alerts](https://github.com/apache/airflow/security/code-scanning) should be reviewed
and security team should be pinged to review and resolve them.
# Prepare the Apache Airflow Helm Chart Release Candidate
## Pre-requisites
- Helm version == 3.14.0
- towncrier version == 23.11.0
- The `helm gpg` plugin to sign the chart. It can be found at: https://github.com/technosophos/helm-gpg
Instructions for installing the pre-requisites are explained in the steps below.
## Set environment variables
- Set environment variables
```shell
# Set Version
export VERSION=1.1.0
export VERSION_SUFFIX=rc1
# Set AIRFLOW_REPO_ROOT to the path of your git repo
export AIRFLOW_REPO_ROOT=$(pwd -P)
# Example after cloning
git clone https://github.com/apache/airflow.git airflow
cd airflow
export AIRFLOW_REPO_ROOT=$(pwd -P)
```
## Setup k8s environment (mainly helm chart)
This will install Helm in the recent version and enter the environment where you can run `helm` commands
and installs the necessary python dependencies (including `towncrier` in the k8s virtual environment).
```shell
breeze k8s setup-env
breeze k8s shell
```
Install the helm-gpg plugin, if you have not installed it already. This command will install the plugin
using commit sha of version of the plugin that is known to work with latest openssl and reviewed by us.
```shell
helm plugin install https://github.com/technosophos/helm-gpg --version 6303407eb63deaeb1b2f24de611e3468a27ec05b
```
You can also update/uninstall/list the plugin with other `helm plugin` commands. For more information, run:
```shell
helm plugin --help
```
## Build Release Notes
Before creating the RC, you need to build and commit the release notes for the release:
Preview with:
```shell script
towncrier build --draft --version=${VERSION} --date=2021-12-15 --dir chart --config chart/newsfragments/config.toml
```
Then remove the `--draft` flag to have towncrier build the release notes for real.
The significant changes section does require some reformatting - look at prior releases as an example.
If no significant changes where added in this release, add the header and put "No significant changes.".
Then, get the rest of the entries, categorize them into the appropriate sections, and add it to the release notes.
``` shell script
git log --oneline helm-chart/1.1.0..main --pretty='format:- %s' -- chart/ docs/helm-chart/
```
### Add changelog annotations to `Chart.yaml`
Once the release notes have been built, run the script to generate the changelog annotations.
```shell
./dev/chart/build_changelog_annotations.py
```
Verify the output looks right (only entries from this release), then put them in `Chart.yaml`, for example:
```yaml
annotations:
artifacthub.io/changes: |
- kind: added
description: Add resources for `cleanup` and `createuser` jobs
links:
- name: "#19263"
url: https://github.com/apache/airflow/pull/19263
```
Make sure that all the release notes changes are submitted as PR and merged. Changes in release notes should
also automatically (via `prek` trigger updating of the [reproducible_build.yaml](../chart/reproducible_build.yaml))
file which is uses to reproducibly build the chart package and source tarball.
You can leave the k8s environment now:
```shell
exit
```
## Update minimum version of Kubernetes
The minimum version of Kubernetes should be updated according to
https://github.com/apache/airflow/blob/main/README.md#requirements in two places:
* [../../helm-chart/README.md](../chart/README.md)
* [../docs/helm-chart/index.rst](../docs/helm-chart/index.rst)
## Build RC artifacts
The Release Candidate artifacts we vote upon should be the exact ones we vote against,
without any modification than renaming – i.e. the contents of the files must be
the same between voted release candidate and final release.
Because of this the version in the built artifacts that will become the
official Apache releases must not include the rcN suffix.
Make sure you have `apache` remote set up pointing to the apache git repo.
If needed, add it with:
```shell
git remote add apache git@github.com:apache/airflow.git
git fetch apache
```
- We currently release Helm Chart from `main` branch:
```shell
git checkout apache/main
```
- Clean the checkout: (note that this step will also clean any IDE settings you might have so better to
do it in a checked out version you only use for releasing)
```shell
git clean -fxd
rm -rf dist/*
```
- Generate the source tarball:
```shell
breeze release-management prepare-helm-chart-tarball --version ${VERSION} --version-suffix ${VERSION_SUFFIX}
```
Note: The version suffix is only used for the RC tag and tag message. The version in the tarball is without the suffix, so the tarball can be released as-is when the vote passes.
- Generate the binary Helm Chart release:
```shell
VERSION_SUFFIX= breeze release-management prepare-helm-chart-package --sign-email jedcunningham@apache.org
```
Note: we temporarily unset VERSION_SUFFIX when preparing the package, as we do not want it set and the flag defaults to the env var
Warning: you need the `helm gpg` plugin to sign the chart (instructions to install it above)
This should generate two files:
- `dist/airflow-${VERSION}.tgz`
- `dist/airflow-${VERSION}.tgz.prov`
The second one is a provenance file as described in
https://helm.sh/docs/topics/provenance/. It can be used to verify integrity of the Helm chart.
- Generate SHA512/ASC
```shell
pushd ${AIRFLOW_REPO_ROOT}/dist
${AIRFLOW_REPO_ROOT}/dev/sign.sh airflow-*.tgz airflow-*-source.tar.gz
popd
```
- Move the artifacts to ASF dev dist repo, Generate convenience `index.yaml` & Publish them
```shell
# First clone the repo
svn checkout https://dist.apache.org/repos/dist/dev/airflow airflow-dev
# Create new folder for the release
cd airflow-dev/helm-chart
svn mkdir ${VERSION}${VERSION_SUFFIX}
# Move the artifacts to svn folder
mv ${AIRFLOW_REPO_ROOT}/dist/airflow-${VERSION}.tgz* ${VERSION}${VERSION_SUFFIX}/
mv ${AIRFLOW_REPO_ROOT}/dist/airflow-chart-${VERSION}-source.tar.gz* ${VERSION}${VERSION_SUFFIX}/
cd ${VERSION}${VERSION_SUFFIX}
###### Generate index.yaml file - Start
# Download the latest index.yaml on Airflow Website
curl https://airflow.apache.org/index.yaml --output index.yaml
# Replace the URLs from "https://downloads.apache.org" to "https://archive.apache.org"
# as the downloads.apache.org only contains latest releases.
sed -i 's|https://downloads.apache.org/airflow/helm-chart/|https://archive.apache.org/dist/airflow/helm-chart/|' index.yaml
# Generate / Merge the new version with existing index.yaml
helm repo index --merge ./index.yaml . --url "https://dist.apache.org/repos/dist/dev/airflow/helm-chart/${VERSION}${VERSION_SUFFIX}"
###### Generate index.yaml file - End
# Commit the artifacts
svn add *
svn commit -m "Add artifacts for Helm Chart ${VERSION}${VERSION_SUFFIX}"
```
- Remove old Helm Chart versions from the dev repo
```shell
cd ..
export PREVIOUS_VERSION_WITH_SUFFIX=1.0.0rc1
svn rm ${PREVIOUS_VERSION_WITH_SUFFIX}
svn commit -m "Remove old Helm Chart release: ${PREVIOUS_VERSION_WITH_SUFFIX}"
```
- Push Tag for the release candidate
```shell
cd ${AIRFLOW_REPO_ROOT}
git push apache tag helm-chart/${VERSION}${VERSION_SUFFIX}
```
## Publish rc documentation
Documentation is an essential part of the product and should be made available to users.
In our cases, documentation for the released versions is published in S3 bucket, and the site is
kept in a separate repository - [`apache/airflow-site`](https://github.com/apache/airflow-site),
but the documentation source code and build tools are available in the `apache/airflow` repository, so
you need to run several workflows to publish the documentation. More details about it can be found in
[Docs README](../docs/README.md) showing the architecture and workflows including manual workflows for
emergency cases.
There are two steps to publish the documentation:
1. Publish the documentation to the `staging` S3 bucket.
The release manager publishes the documentation using GitHub Actions workflow
[Publish Docs to S3](https://github.com/apache/airflow/actions/workflows/publish-docs-to-s3.yml).
You can specify the RC tag to use to build the docs and 'helm-chart' passed as packages to be built.
The release manager publishes the documentation using GitHub Actions workflow
[Publish Docs to S3](https://github.com/apache/airflow/actions/workflows/publish-docs-to-s3.yml). By
default `auto` selection should publish to the `staging` bucket - based on
the tag you use - pre-release tags go to staging. But you can also override it and specify the destination
manually to be `live` or `staging`.
After that step, the provider documentation should be available under the https://airflow.staged.apache.org
(same as in the helm chart documentation).
2. Invalidate Fastly cache for the documentation.
In order to do it, you need to run the [Build docs](https://github.com/apache/airflow-site/actions/workflows/build.yml)
workflow in `airflow-site` repository. Make sure to use `staging` branch.
After that workflow completes, the new version should be available in the drop-down list and stable links
should be updated and Fastly cache should be invalidated.
## Prepare issue for testing status of rc
Create an issue for testing status of the RC (PREVIOUS_RELEASE should be the previous release version
(for example 1.4.0).
```shell script
cat <<EOF
Status of testing of Apache Airflow Helm Chart ${VERSION}${VERSION_SUFFIX}
EOF
```
Content is generated with:
```shell
breeze release-management generate-issue-content-helm-chart \
--previous-release helm-chart/<PREVIOUS_RELEASE> --current-release helm-chart/${VERSION}${VERSION_SUFFIX}
```
Copy the URL of the issue.
## Prepare Vote email on the Apache Airflow release candidate
- Send out a vote to the dev@airflow.apache.org mailing list:
Subject:
```shell
cat <<EOF
[VOTE] Release Apache Airflow Helm Chart ${VERSION} based on ${VERSION}${VERSION_SUFFIX}
EOF
```
```shell
export VOTE_END_TIME=$(date --utc -d "now + 72 hours + 10 minutes" +'%Y-%m-%d %H:%M')
export TIME_DATE_URL="to?iso=$(date --utc -d "now + 72 hours + 10 minutes" +'%Y%m%dT%H%M')&p0=136&font=cursive"
```
Body:
```shell
cat <<EOF
Hello Apache Airflow Community,
This is a call for the vote to release Helm Chart version ${VERSION}.
The release candidate is available at:
https://dist.apache.org/repos/dist/dev/airflow/helm-chart/${VERSION}${VERSION_SUFFIX}/
airflow-chart-${VERSION}-source.tar.gz - is the "main source release" that comes with INSTALL instructions.
airflow-${VERSION}.tgz - is the binary Helm Chart release.
Public keys are available at: https://www.apache.org/dist/airflow/KEYS
For convenience "index.yaml" has been uploaded (though excluded from voting), so you can also run the below commands.
helm repo add apache-airflow-dev https://dist.apache.org/repos/dist/dev/airflow/helm-chart/${VERSION}${VERSION_SUFFIX}/
helm repo update
helm install airflow apache-airflow-dev/airflow
airflow-${VERSION}.tgz.prov - is also uploaded for verifying Chart Integrity, though not strictly required for releasing the artifact based on ASF Guidelines.
$ helm gpg verify airflow-${VERSION}.tgz
gpg: Signature made Thu Jan 6 21:33:35 2022 MST
gpg: using RSA key E1A1E984F55B8F280BD9CBA20BB7163892A2E48E
gpg: Good signature from "Jed Cunningham <jedcunningham@apache.org>" [ultimate]
plugin: Chart SHA verified. sha256:b33eac716e0416a18af89fb4fa1043fcfcf24f9f903cda3912729815213525df
The vote will be open for at least 72 hours ($VOTE_END_TIME UTC) or until the necessary number of votes is reached.
https://www.timeanddate.com/countdown/$TIME_DATE_URL
Please vote accordingly:
[ ] +1 approve
[ ] +0 no opinion
[ ] -1 disapprove with the reason
Only votes from PMC members are binding, but members of the community are
encouraged to test the release and vote with "(non-binding)".
Consider this my (binding) +1.
For license checks, the .rat-excludes files is included, so you can run the following to verify licenses (just update your path to rat):
tar -xvf airflow-chart-${VERSION}-source.tar.gz
cd airflow-chart-${VERSION}
java -jar apache-rat-0.13.jar chart -E .rat-excludes
Please note that the version number excludes the \`rcX\` string, so it's now
simply ${VERSION}. This will allow us to rename the artifact without modifying
the artifact checksums when we actually release it.
The status of testing the Helm Chart by the community is kept here:
<TODO COPY LINK TO THE ISSUE CREATED>
Thanks,
<your name>
EOF
```
Note, you need to update the `helm gpg verify` output and verify the end of the voting period in the body.
Note, For RC2/3 you may refer to shorten vote period as agreed in mailing list [thread](https://lists.apache.org/thread/cv194w1fqqykrhswhmm54zy9gnnv6kgm).
# Verify the release candidate by PMC members
The PMC members should verify the releases in order to make sure the release is following the
[Apache Legal Release Policy](http://www.apache.org/legal/release-policy.html).
At least 3 (+1) votes should be recorded in accordance to
[Votes on Package Releases](https://www.apache.org/foundation/voting.html#ReleaseVotes)
The legal checks include:
* checking if the packages are present in the right dist folder on svn
* verifying if all the sources have correct licences
* verifying if release manager signed the releases with the right key
* verifying if all the checksums are valid for the release
## SVN check
The files should be present in the sub-folder of
[Airflow dist](https://dist.apache.org/repos/dist/dev/airflow/)
The following files should be present (7 files):
* `airflow-chart-${VERSION}-source.tar.gz` + .asc + .sha512
* `airflow-{VERSION}.tgz` + .asc + .sha512
* `airflow-{VERSION}.tgz.prov`
## Source tarball reproducibility check
The source tarball should be reproducible. This means that if you build it twice, you should get
the same result. This is important for security reasons, as it ensures that the source code
has not been tampered with.
1. Go to airflow repository root (for example if you cloned it to `../airflow` then `cd ../airflow`)
```shell
cd ../airflow
AIRFLOW_REPO_ROOT=$(pwd -P)
```
2. Set the version of the release you are checking
```shell
VERSION=12.0.1
VERSION_SUFFIX=rc1
VERSION_RC=${VERSION}${VERSION_SUFFIX}
```
3. Check-out the branch from which the release was made and cleanup dist folder:
```shell
git checkout helm-chart/${VERSION_RC}
rm -rf dist/*
```
4. Build the source tarball and package. Since you are not releasing the package, you should ignore version
check and skip tagging. There is no need to specify version as it is stored in Chart.yaml of the rc tag.
```shell
breeze release-management prepare-helm-chart-tarball --version-suffix ${VERSION_SUFFIX} --ignore-version-check --skip-tagging
VERISON_SUFFIX= breeze release-management prepare-helm-chart-package
```
Note: we temporarily unset VERSION_SUFFIX when preparing the package, as we do not want it set and the flag defaults to the env var
5. Compare the produced tarball binary with ones in SVN:
As a PMC member, you should be able to clone the SVN repository:
```shell script
cd ..
[ -d asf-dist ] || svn checkout --depth=immediates https://dist.apache.org/repos/dist asf-dist
svn update --set-depth=infinity asf-dist/dev/airflow
```
Or update it if you already checked it out:
```shell script
cd asf-dist/dev/airflow
svn update .
```
Set an environment variable: SVN_REPO_ROOT to the root of folder where you have asf-dist checked out:
```shell script
cd asf-dist/
export SVN_REPO_ROOT=$(pwd -P)
```
```shell
diff ${AIRFLOW_REPO_ROOT}/dist/airflow-chart-${VERSION}-source.tar.gz ${SVN_REPO_ROOT}/dev/airflow/helm-chart/${VERSION_RC}/airflow-chart-${VERSION}-source.tar.gz
diff ${AIRFLOW_REPO_ROOT}/dist/airflow-${VERSION}.tgz ${SVN_REPO_ROOT}/dev/airflow/helm-chart/${VERSION_RC}/airflow-${VERSION}.tgz
```
There should be no differences reported. If you see "binary files differ" message, it means that
the source tarball is not reproducible. This is not a blocker for the release, you can unpack the sources
of the tarball and compare the two tarballs and check the differences for example
with `diff -R <DIR1> <DIR2>`. It could be that our reproducible build script is not working correctly yet,
and we need to fix it (so checking the differences would be helpful also to find out what is wrong).
Before proceeding next you want to go to the SVN directory
```shell
cd ${SVN_REPO_ROOT}/dev/airflow/helm-chart/${VERSION_RC}
```
## Licence check
You can run this command to do it for you (including checksum verification for your own security):
```shell script
# Checksum value is taken from https://downloads.apache.org/creadur/apache-rat-0.17/apache-rat-0.17-bin.tar.gz.sha512
wget -q https://dlcdn.apache.org//creadur/apache-rat-0.17/apache-rat-0.17-bin.tar.gz -O /tmp/apache-rat-0.17-bin.tar.gz
echo "32848673dc4fb639c33ad85172dfa9d7a4441a0144e407771c9f7eb6a9a0b7a9b557b9722af968500fae84a6e60775449d538e36e342f786f20945b1645294a0 /tmp/apache-rat-0.17-bin.tar.gz" | sha512sum -c -
tar -xzf /tmp/apache-rat-0.17-bin.tar.gz -C /tmp
```
* Unpack the release source archive (the `<package + version>-source.tar.gz` file) to a folder
* Enter the sources folder run the check
```shell
rm -rf /tmp/apache/airflow-src && mkdir -p /tmp/apache-airflow-src && tar -xzf ${SVN_REPO_ROOT}/dev/airflow/helm-chart/${VERSION_RC}/airflow-chart-*-source.tar.gz --strip-components 1 -C /tmp/apache-airflow-src
```
```shell
java -jar /tmp/apache-rat-0.17/apache-rat-0.17.jar --input-exclude-file /tmp/apache-airflow-src/.rat-excludes /tmp/apache-airflow-src/ | grep -E "! |INFO: "
```
where `.rat-excludes` is the file in the root of Chart source code.
You should see no files reported as Unknown or with wrong licence and summary of the check similar to:
```
INFO: Apache Creadur RAT 0.17 (Apache Software Foundation)
INFO: Excluding patterns: .git-blame-ignore-revs, .github/*, .git ...
INFO: Excluding MISC collection.
INFO: Excluding HIDDEN_DIR collection.
SLF4J(W): No SLF4J providers were found.
SLF4J(W): Defaulting to no-operation (NOP) logger implementation
SLF4J(W): See https://www.slf4j.org/codes.html#noProviders for further details.
INFO: RAT summary:
INFO: Approved: 15615
INFO: Archives: 2
INFO: Binaries: 813
INFO: Document types: 5
INFO: Ignored: 2392
INFO: License categories: 2
INFO: License names: 2
INFO: Notices: 216
INFO: Standards: 15609
INFO: Unapproved: 0
INFO: Unknown: 0
```
There should be no files reported as Unknown or Unapproved. The files that are unknown or unapproved should be shown with a line starting with `!`.
For example:
```
! Unapproved: 1 A count of unapproved licenses.
! /CODE_OF_CONDUCT.md
```
## Signature check
Make sure you have imported into your GPG the PGP key of the person signing the release. You can find the valid keys in
[KEYS](https://dist.apache.org/repos/dist/release/airflow/KEYS).
You can import the whole KEYS file:
```shell script
wget https://dist.apache.org/repos/dist/release/airflow/KEYS
gpg --import KEYS
```
You can also import the keys individually from a keyserver. The below one uses Kaxil's key and
retrieves it from the default GPG keyserver
[OpenPGP.org](https://keys.openpgp.org):
```shell script
gpg --keyserver keys.openpgp.org --receive-keys CDE15C6E4D3A8EC4ECF4BA4B6674E08AD7DE406F
```
You should choose to import the key when asked.
Note that by being default, the OpenPGP server tends to be overloaded often and might respond with
errors or timeouts. Many of the release managers also uploaded their keys to the
[GNUPG.net](https://keys.gnupg.net) keyserver, and you can retrieve it from there.
```shell script
gpg --keyserver keys.gnupg.net --receive-keys CDE15C6E4D3A8EC4ECF4BA4B6674E08AD7DE406F
```
Once you have the keys, the signatures can be verified by running this:
```shell script
for i in *.asc
do
echo -e "Checking $i\n"; gpg --verify $i
done
```
This should produce results similar to the below. The "Good signature from ..." is indication
that the signatures are correct. Do not worry about the "not certified with a trusted signature"
warning. Most of the certificates used by release managers are self-signed, and that's why you get this
warning. By importing the key either from the server in the previous step or from the
[KEYS](https://dist.apache.org/repos/dist/release/airflow/KEYS) page, you know that
this is a valid key already. To suppress the warning you may edit the key's trust level
by running `gpg --edit-key <key id> trust` and entering `5` to assign trust level `ultimate`.
```
Checking airflow-1.0.0.tgz.asc
gpg: assuming signed data in 'airflow-1.0.0.tgz'
gpg: Signature made Sun 16 May 01:25:24 2021 BST
gpg: using RSA key CDE15C6E4D3A8EC4ECF4BA4B6674E08AD7DE406F
gpg: issuer "kaxilnaik@apache.org"
gpg: Good signature from "Kaxil Naik <kaxilnaik@apache.org>" [unknown]
gpg: aka "Kaxil Naik <kaxilnaik@gmail.com>" [unknown]
gpg: WARNING: The key's User ID is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: CDE1 5C6E 4D3A 8EC4 ECF4 BA4B 6674 E08A D7DE 406F
Checking airflow-chart-1.0.0-source.tar.gz.asc
gpg: assuming signed data in 'airflow-chart-1.0.0-source.tar.gz'
gpg: Signature made Sun 16 May 02:24:09 2021 BST
gpg: using RSA key CDE15C6E4D3A8EC4ECF4BA4B6674E08AD7DE406F
gpg: issuer "kaxilnaik@apache.org"
gpg: Good signature from "Kaxil Naik <kaxilnaik@apache.org>" [unknown]
gpg: aka "Kaxil Naik <kaxilnaik@gmail.com>" [unknown]
gpg: WARNING: The key's User ID is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: CDE1 5C6E 4D3A 8EC4 ECF4 BA4B 6674 E08A D7DE 406F
```
## SHA512 sum check
Run this:
```shell
for i in *.sha512
do
echo "Checking $i"; shasum -a 512 `basename $i .sha512 ` | diff - $i
done
```
You should get output similar to:
```
Checking airflow-1.0.0.tgz.sha512
Checking airflow-chart-1.0.0-source.tar.gz.sha512
```
# Verify release candidates by Contributors
Contributors can run below commands to test the Helm Chart
```shell
helm repo add apache-airflow-dev https://dist.apache.org/repos/dist/dev/airflow/helm-chart/1.1.0rc1/
helm repo update
helm install airflow apache-airflow-dev/airflow
```
You can then perform any other verifications to check that it works as you expected by
upgrading the Chart or installing by overriding default of `values.yaml`.
# Publish the final release
## Summarize the voting for the release
Once the vote has been passed, you will need to send a result vote to dev@airflow.apache.org:
Subject:
```
[RESULT][VOTE] Release Apache Airflow Helm Chart 1.1.0 based on 1.1.0rc1
```
Message:
```
Hello all,
The vote to release Apache Airflow Helm Chart version 1.1.0 based on 1.1.0rc1 is now closed.
The vote PASSED with 4 binding "+1", 4 non-binding "+1" and 0 "-1" votes:
"+1" Binding votes:
- Kaxil Naik
- Jarek Potiuk
- Ash Berlin-Taylor
- Xiaodong Deng
"+1" Non-Binding votes:
- Jed Cunningham
- Ephraim Anierobi
- Dennis Akpenyi
- Ian Stanton
Vote thread:
https://lists.apache.org/thread.html/r865f041e491a2a7a52e17784abf0d0f2e35c3bac5ae8a05927285558%40%3Cdev.airflow.apache.org%3E
I'll continue with the release process and the release announcement will follow shortly.
Thanks,
<your name>
```
## Publish release to SVN
You need to migrate the RC artifacts that passed to this repository:
https://dist.apache.org/repos/dist/release/airflow/helm-chart/
(The migration should include renaming the files so that they no longer have the RC number in their filenames.)
The best way of doing this is to svn cp between the two repos (this avoids having to upload
the binaries again, and gives a clearer history in the svn commit logs):
```shell
# First clone the repo
export VERSION=1.1.0
export VERSION_SUFFIX=rc1
svn checkout https://dist.apache.org/repos/dist/release/airflow airflow-release
# Create new folder for the release
cd airflow-release/helm-chart
export AIRFLOW_SVN_RELEASE_HELM=$(pwd -P)
svn mkdir ${VERSION}
cd ${VERSION}
# Move the artifacts to svn folder & commit (don't copy or copy & remove - index.yaml)
for f in ../../../airflow-dev/helm-chart/${VERSION}${VERSION_SUFFIX}/*; do svn cp $f ${$(basename $f)/}; done
svn rm index.yaml
svn commit -m "Release Airflow Helm Chart Check ${VERSION} from ${VERSION}${VERSION_SUFFIX}"
```
Verify that the packages appear in [Airflow Helm Chart](https://dist.apache.org/repos/dist/release/airflow/helm-chart/).
## Publish release tag
Create and push the release tag:
```shell
cd "${AIRFLOW_REPO_ROOT}"
git checkout helm-chart/${VERSION}${VERSION_SUFFIX}
git tag -s helm-chart/${VERSION} -m "Apache Airflow Helm Chart ${VERSION}"
git push apache helm-chart/${VERSION}
```
## Publish final documentation
Documentation is an essential part of the product and should be made available to users.
In our cases, documentation for the released versions is published in S3 bucket, and the site is
kept in a separate repository - [`apache/airflow-site`](https://github.com/apache/airflow-site),
but the documentation source code and build tools are available in the `apache/airflow` repository, so
you need to run several workflows to publish the documentation. More details about it can be found in
[Docs README](../docs/README.md) showing the architecture and workflows including manual workflows for
emergency cases.
You should use the `breeze` command to publish the documentation.
The command does the following:
1. Triggers [Publish Docs to S3](https://github.com/apache/airflow/actions/workflows/publish-docs-to-s3.yml).
2. Triggers workflow in apache/airflow-site to refresh
3. Triggers S3 to GitHub Sync
```shell script
breeze workflow-run publish-docs --ref <tag> --site-env <staging/live/auto> helm-chart
```
The `--ref` parameter should be the tag of the release candidate you are publishing. This should be a
release tag like `helm-chart/1.1.0`
The `--site-env` parameter should be set to `staging` for pre-release versions or `live` for final releases.
The default option is `auto` which should automatically select the right environment based on the tag name.
Other available parameters can be found with:
```shell script
breeze workflow-run publish-docs --help
```
## Update `index.yaml` in airflow-site
Regenerate `index.yaml` so it can be added to the Airflow website to allow: `helm repo add apache-airflow https://airflow.apache.org`.
```shell
git clone https://github.com/apache/airflow-site.git airflow-site
cd airflow-site
curl https://dist.apache.org/repos/dist/dev/airflow/helm-chart/${VERSION}${VERSION_SUFFIX}/index.yaml -o index.yaml
cp ${AIRFLOW_SVN_RELEASE_HELM}/${VERSION}/airflow-${VERSION}.tgz .
helm repo index --merge ./index.yaml . --url "https://downloads.apache.org/airflow/helm-chart/${VERSION}"
rm airflow-${VERSION}.tgz
mv index.yaml landing-pages/site/static/index.yaml
git add p . # leave the license at the top
git commit -m "Add Apache Airflow Helm Chart Release ${VERSION} to chart index file"
git push
# and finally open a PR
```
## Wait for ArtifactHUB to discover new release
As we link out to ArtifactHUB in all of our release communications, we now wait until ArtifactHUB has discovered the new release. This can take 30 minutes or so to happen after the index change PR from above is merged.
## Notify developers of release
- Notify users@airflow.apache.org (cc'ing dev@airflow.apache.org) that
the artifacts have been published:
Subject:
```shell
cat <<EOF
[ANNOUNCE] Apache Airflow Helm Chart version ${VERSION} Released
EOF
```
Body:
```shell
cat <<EOF
Dear Airflow community,
I am pleased to announce that we have released Apache Airflow Helm chart ${VERSION} 🎉 🎊
The source release, as well as the "binary" Helm Chart release, are available:
📦 Official Sources: https://airflow.apache.org/docs/helm-chart/${VERSION}/installing-helm-chart-from-sources.html
📦 ArtifactHub: https://artifacthub.io/packages/helm/apache-airflow/airflow
📚 Docs: https://airflow.apache.org/docs/helm-chart/${VERSION}/
🚀 Quick Start Installation Guide: https://airflow.apache.org/docs/helm-chart/${VERSION}/quick-start.html
🛠️ Release Notes: https://airflow.apache.org/docs/helm-chart/${VERSION}/release_notes.html
Thanks to all the contributors who made this possible.
Cheers,
<your name>
EOF
```
Send the same email to announce@apache.org, except change the opening line to `Dear community,`.
It is more reliable to send it via the web ui at https://lists.apache.org/list.html?announce@apache.org
(press "c" to compose a new thread)
## Send announcements about security issues fixed in the release
The release manager should review and mark as READY all the security issues fixed in the release.
Such issues are marked as affecting `< <JUST_RELEASED_VERSION>` in the CVE management tool
at https://cveprocess.apache.org/. Then the release manager should announced the issues via the tool.
Once announced, each of the issue should be linked with a 'reference' with tag 'vendor advisory' with the
URL to the announcement published automatically by the CVE management tool.
Note that the announce@apache.org is moderated, and the link to the email thread will not be published
immediately, that's why it is recommended to add the link to users@airflow.apache.org which takes usually
few seconds to be published after the CVE tool sends them.
The ASF Security will be notified and will submit to the CVE project and will set the state to 'PUBLIC'.
## Add release data to Apache Committee Report Helper
Add the release data (version and date) at: https://reporter.apache.org/addrelease.html?airflow
## Update Announcements page
Update "Announcements" page at the [Official Airflow website](https://airflow.apache.org/announcements/)
## Create release on GitHub
Create a new release on GitHub with the release notes and assets from the release svn.
## Close the milestone
Before closing the milestone on GitHub, make sure that all PR marked for it are either part of the release (was cherry picked) or
postponed to the next release, then close the milestone. Create the next one if it hasn't been already (it probably has been).
Update the new milestone in the [*Currently we are working on* issue](https://github.com/apache/airflow/issues/10176)
make sure to update the last updated timestamp as well.
## Close the testing status issue
Don't forget to thank the folks who tested and close the issue tracking the testing status.
## Update issue template with the new release
Updating issue templates in `.github/ISSUE_TEMPLATE/4-airflow_helmchart_bug_report.yml` with the new version
## Announce the release on the community slack
Post this in the #announce channel:
```shell
cat <<EOF
We've just released Apache Airflow Helm Chart ${VERSION} 🎉
📦 ArtifactHub: https://artifacthub.io/packages/helm/apache-airflow/airflow
📚 Docs: https://airflow.apache.org/docs/helm-chart/${VERSION}/
🚀 Quick Start Installation Guide: https://airflow.apache.org/docs/helm-chart/${VERSION}/quick-start.html
🛠 Release Notes: https://airflow.apache.org/docs/helm-chart/${VERSION}/release_notes.html
Thanks to all the contributors who made this possible.
EOF
```
## Announce about the release in social media
------------------------------------------------------------------------------------------------------------
Announcement is done from official Apache-Airflow accounts.
* LinkedIn: https://www.linkedin.com/company/apache-airflow/
* Fosstodon: https://fosstodon.org/@airflow
* Bluesky: https://bsky.app/profile/apache-airflow.bsky.social
Make sure attach the release image generated with Figma to the post.
If you don't have access to the account ask a PMC member to post.
------------------------------------------------------------------------------------------------------------
Tweet and post on Linkedin about the release:
```shell
cat <<EOF
We've just released Apache Airflow Helm chart ${VERSION} 🎉
📦 ArtifactHub: https://artifacthub.io/packages/helm/apache-airflow/airflow
📚 Docs: https://airflow.apache.org/docs/helm-chart/${VERSION}/
🛠️ Release Notes: https://airflow.apache.org/docs/helm-chart/${VERSION}/release_notes.html
Thanks to all the contributors who made this possible.
EOF
```
## Bump chart version in Chart.yaml
Bump the chart version to the next version in `chart/Chart.yaml` in main.
## Remove old releases
We should keep the old version a little longer than a day or at least until the updated
``index.yaml`` is published. This is to avoid errors for users who haven't run ``helm repo update``.
It is probably ok if we leave last 2 versions on release svn repo too.
```shell
# http://www.apache.org/legal/release-policy.html#when-to-archive
cd airflow-release/helm-chart
export PREVIOUS_VERSION=1.0.0
svn rm ${PREVIOUS_VERSION}
svn commit -m "Remove old Helm Chart release: ${PREVIOUS_VERSION}"
```
# Additional processes
## Fixing released documentation
Sometimes we want to rebuild the documentation with some fixes that were merged in main
branch, for example when there are html layout changes or typo fixes, or formatting issue fixes.
In this case the process is as follows:
* When you want to re-publish `helm-chart/X.Y.Z` docs, create (or pull if already created)
`helm-chart/X.Y.Z-docs` branch
* Cherry-pick changes you want to add and push to the main `apache/airflow` repo
* Run the publishing workflow.
In case you are releasing latest released version of helm-chart (which should be most of the cases), run this:
```bash
breeze workflow-run publish-docs --site-env live --ref helm-chart/X.Y.Z-docs \
--skip-tag-validation \
helm-chart
```
In case you are releasing an older version of helm-chart, you should skip writing to the stable folder
```bash
breeze workflow-run publish-docs --site-env live --ref helm-chart/X.Y.Z-docs \
--skip-tag-validation \
--skip-write-to-stable-folder \
helm-chart
```
|
unknown
|
github
|
https://github.com/apache/airflow
|
dev/README_RELEASE_HELM_CHART.md
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Langford's number problem in Google CP Solver.
Langford's number problem (CSP lib problem 24)
http://www.csplib.org/prob/prob024/
'''
Arrange 2 sets of positive integers 1..k to a sequence,
such that, following the first occurence of an integer i,
each subsequent occurrence of i, appears i+1 indices later
than the last.
For example, for k=4, a solution would be 41312432
'''
* John E. Miller: Langford's Problem
http://www.lclark.edu/~miller/langford.html
* Encyclopedia of Integer Sequences for the number of solutions for each k
http://www.research.att.com/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=014552
Also, see the following models:
* MiniZinc: http://www.hakank.org/minizinc/langford2.mzn
* Gecode/R: http://www.hakank.org/gecode_r/langford.rb
* ECLiPSe: http://hakank.org/eclipse/langford.ecl
* SICStus: http://hakank.org/sicstus/langford.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main(k=8, num_sol=0):
# Create the solver.
solver = pywrapcp.Solver("Langford")
#
# data
#
print("k:", k)
p = list(range(2 * k))
#
# declare variables
#
position = [solver.IntVar(0, 2 * k - 1, "position[%i]" % i) for i in p]
solution = [solver.IntVar(1, k, "position[%i]" % i) for i in p]
#
# constraints
#
solver.Add(solver.AllDifferent(position))
for i in range(1, k + 1):
solver.Add(position[i + k - 1] == position[i - 1] + i + 1)
solver.Add(solver.Element(solution, position[i - 1]) == i)
solver.Add(solver.Element(solution, position[k + i - 1]) == i)
# symmetry breaking
solver.Add(solution[0] < solution[2 * k - 1])
#
# search and result
#
db = solver.Phase(position,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print("solution:", ",".join([str(solution[i].Value()) for i in p]))
num_solutions += 1
if num_sol > 0 and num_solutions >= num_sol:
break
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
k = 8
num_sol = 0
if __name__ == "__main__":
if len(sys.argv) > 1:
k = int(sys.argv[1])
if len(sys.argv) > 2:
num_sol = int(sys.argv[2])
main(k, num_sol)
|
unknown
|
codeparrot/codeparrot-clean
| ||
function Component(props) {
let x = {};
// onChange should be inferred as immutable, because the value
// it captures (`x`) is frozen by the time the function is referenced
const onChange = e => {
maybeMutate(x, e.target.value);
};
if (props.cond) {
<div>{x}</div>;
}
return <Foo value={x} onChange={onChange} />;
}
|
javascript
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/function-expression-captures-value-later-frozen-jsx.js
|
import os
import sys
import magic
import re
import logging
import subprocess
from time import sleep
import RPi.GPIO as GPIO
from utils import findBin, whichUSBboard, getBoardConfigs
from target import Target
from pin import Pin
class NAsatbus(Target):
""" Return an instance of Target specific to the NanoAvionics Satbus
board actually requires a physical programmer, as it lacks a USB
interface to an integrated programmer. As such, it is quite different
physically from the other boards, but once hooked up to a physical
programming interface like some variant of the ST-LINK v2, it should
behave well.
Of note, the board can be programmed with any of the STM32F4 Discovery
board programmers (They are ST-Link V2-compatible).
"""
def __init__(self):
self.board = "na-satbus-3c0-gcc"
self.arch = "ARM"
self.cpu = "stm32f405"
self.binfiletype = "ELF"
self.pins = {
'rst' : Pin(name = 'rst', number = 17),
'pwr' : Pin(name = 'pwr', number = 27)
}
# IMPORTANT NOTE: openocd must be version 0.9 or later.
def flash(self, binobj):
"""
Use an external shell to push the ELF file using openocd. It seems
to be necessary to pre-declare the LIB PATH for some commands, and
if the path variable is not available as declared in /etc/profile, it
can be fixed here with the sp1 variable, below. HOWEVER: from ansible,
the locally-declared and locally-requested path variables DO NOT WORK
and cause ERRORS. Workaround: use the ansible -shell- command and
declare the library path before executing a bash -c command.
IMPORTANT NOTE: openocd must be version 0.9 or later.
"""
# log = logging.getLogger('logfoo')
logging.info("Initiating binary file flash.")
if not self.sanitycheck(binobj):
logging.error("Binary file didn't pass a sanity check.")
return False
# TODO set all of these via Ansible, and get these vars from os.environ
distpath = os.environ['KUBOS_LIB_PATH']
configfiles = "../../flash/openocd"
searchpath = str("%s/%s" % (distpath, configfiles))
sp1 = os.environ['LD_LIBRARY_PATH']
sp1 = str(sp1 + ":" + distpath)
sp1 = str(sp1 + ":" + searchpath)
# will dfu-util work instead?
openocdloc = findBin('openocd')
unamestr = subprocess.check_output('uname')
unamestr = re.sub('\n$', '', unamestr)
# TODO adjust the paths for OS X
# At present, this function only expects one board to be attached. TODO
boards = whichUSBboard()
configs = getBoardConfigs(boards)
cfg = configs[2] # config file to use with openocd
cmd = configs[3] # something like 'stm32f4_flash', an openocd command
fileloc = binobj.abspath()
# $openocd -f $this_dir/$cfg -s $search_path -c "$cmd $file"
command = str("%s -f %s/%s -s %s -c \"%s %s\"") % (openocdloc,
searchpath, cfg, searchpath, cmd, fileloc)
logging.info("Attempting to flash the binary file to the target board.")
logging.debug("Flashing the binary with:\n\n%s" % str(command))
try:
subprocess.check_output(command, shell = True)
return True
except:
return False
#<EOF>
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# (c) 2016, Marcin Skarbek <github@skarbek.name>
# (c) 2016, Andreas Olsson <andreas@arrakis.se>
# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
#
# This module was ported from https://github.com/mskarbek/ansible-nsupdate
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsupdate
short_description: Manage DNS records.
description:
- Create, update and remove DNS records using DDNS updates
- DDNS works well with both bind and Microsoft DNS (see https://technet.microsoft.com/en-us/library/cc961412.aspx)
version_added: "2.3"
requirements:
- dnspython
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Manage DNS record.
choices: ['present', 'absent']
default: 'present'
server:
description:
- Apply DNS modification on this server.
required: true
port:
description:
- Use this TCP port when connecting to C(server).
default: 53
version_added: 2.5
key_name:
description:
- Use TSIG key name to authenticate against DNS C(server)
key_secret:
description:
- Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
key_algorithm:
description:
- Specify key algorithm used by C(key_secret).
choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hamc-sha384',
'hmac-sha512']
default: 'hmac-md5'
zone:
description:
- DNS record will be modified on this C(zone).
required: true
record:
description:
- Sets the DNS record to modify.
required: true
type:
description:
- Sets the record type.
default: 'A'
ttl:
description:
- Sets the record TTL.
default: 3600
value:
description:
- Sets the record value.
default: None
'''
EXAMPLES = '''
- name: Add or modify ansible.example.org A to 192.168.1.1"
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "ansible"
value: "192.168.1.1"
- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "ansible"
value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
- name: Remove puppet.example.org CNAME
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "puppet"
type: "CNAME"
state: absent
'''
RETURN = '''
changed:
description: If module has modified record
returned: success
type: string
record:
description: DNS record
returned: success
type: string
sample: 'ansible'
ttl:
description: DNS record TTL
returned: success
type: int
sample: 86400
type:
description: DNS record type
returned: success
type: string
sample: 'CNAME'
value:
description: DNS record value(s)
returned: success
type: list
sample: '192.168.1.1'
zone:
description: DNS record zone
returned: success
type: string
sample: 'example.org.'
dns_rc:
description: dnspython return code
returned: always
type: int
sample: 4
dns_rc_str:
description: dnspython return code (string representation)
returned: always
type: string
sample: 'REFUSED'
'''
from binascii import Error as binascii_error
from socket import error as socket_error
try:
import dns.update
import dns.query
import dns.tsigkeyring
import dns.message
import dns.resolver
HAVE_DNSPYTHON = True
except ImportError:
HAVE_DNSPYTHON = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class RecordManager(object):
def __init__(self, module):
self.module = module
if module.params['zone'][-1] != '.':
self.zone = module.params['zone'] + '.'
else:
self.zone = module.params['zone']
if module.params['key_name']:
try:
self.keyring = dns.tsigkeyring.from_text({
module.params['key_name']: module.params['key_secret']
})
except TypeError:
module.fail_json(msg='Missing key_secret')
except binascii_error as e:
module.fail_json(msg='TSIG key error: %s' % to_native(e))
else:
self.keyring = None
if module.params['key_algorithm'] == 'hmac-md5':
self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
else:
self.algorithm = module.params['key_algorithm']
self.dns_rc = 0
def __do_update(self, update):
response = None
try:
response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
return response
def create_or_update_record(self):
result = {'changed': False, 'failed': False}
exists = self.record_exists()
if exists in [0, 2]:
if self.module.check_mode:
self.module.exit_json(changed=True)
if exists == 0:
self.dns_rc = self.create_record()
if self.dns_rc != 0:
result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
elif exists == 2:
self.dns_rc = self.modify_record()
if self.dns_rc != 0:
result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
if self.dns_rc != 0:
result['failed'] = True
else:
result['changed'] = True
else:
result['changed'] = False
return result
def create_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
for entry in self.module.params['value']:
try:
update.add(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def modify_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.delete(self.module.params['record'], self.module.params['type'])
for entry in self.module.params['value']:
try:
update.add(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def remove_record(self):
result = {'changed': False, 'failed': False}
if self.record_exists() == 0:
return result
# Check mode and record exists, declared fake change.
if self.module.check_mode:
self.module.exit_json(changed=True)
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.delete(self.module.params['record'], self.module.params['type'])
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc != 0:
result['failed'] = True
result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
else:
result['changed'] = True
return result
def record_exists(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
try:
update.present(self.module.params['record'], self.module.params['type'])
except dns.rdatatype.UnknownRdatatype as e:
self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
if self.module.params['state'] == 'absent':
return 1
for entry in self.module.params['value']:
try:
update.present(self.module.params['record'], self.module.params['type'], entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
return 1
else:
return 2
else:
return 0
def main():
tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
'hmac-sha256', 'hamc-sha384', 'hmac-sha512']
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
server=dict(required=True, type='str'),
port=dict(required=False, default=53, type='int'),
key_name=dict(required=False, type='str'),
key_secret=dict(required=False, type='str', no_log=True),
key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
zone=dict(required=True, type='str'),
record=dict(required=True, type='str'),
type=dict(required=False, default='A', type='str'),
ttl=dict(required=False, default=3600, type='int'),
value=dict(required=False, default=None, type='list')
),
supports_check_mode=True
)
if not HAVE_DNSPYTHON:
module.fail_json(msg='python library dnspython required: pip install dnspython')
if len(module.params["record"]) == 0:
module.fail_json(msg='record cannot be empty.')
record = RecordManager(module)
result = {}
if module.params["state"] == 'absent':
result = record.remove_record()
elif module.params["state"] == 'present':
result = record.create_or_update_record()
result['dns_rc'] = record.dns_rc
result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
if result['failed']:
module.fail_json(**result)
else:
result['record'] = dict(zone=record.zone,
record=module.params['record'],
type=module.params['type'],
ttl=module.params['ttl'],
value=module.params['value'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators
import com.intellij.mock.MockProject
import com.intellij.openapi.Disposable
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestServiceRegistrar
import org.jetbrains.kotlin.test.services.TestServices
/**
* Registers services specific to Standalone mode *tests*, in addition to the Standalone production services registered by
* [FirStandaloneServiceRegistrar][org.jetbrains.kotlin.analysis.api.standalone.base.projectStructure.FirStandaloneServiceRegistrar].
*/
object StandaloneModeTestServiceRegistrar : AnalysisApiTestServiceRegistrar() {
override fun registerProjectModelServices(project: MockProject, disposable: Disposable, testServices: TestServices) {
}
}
|
kotlin
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api-standalone/testFixtures/org/jetbrains/kotlin/analysis/api/standalone/fir/test/configurators/StandaloneModeTestServiceRegistrar.kt
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.5 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_SLICE_SPMD_EXPANDER_H_
#define TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_SLICE_SPMD_EXPANDER_H_
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/Operation.h" // from @llvm-project
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tensorflow/dtensor/mlir/spmd_expander.h"
namespace tensorflow {
namespace dtensor {
class SliceSPMDExpander : public SPMDExpanderBase {
public:
StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override;
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& input_layouts) override;
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& output_layouts) override;
};
} // namespace dtensor
} // namespace tensorflow
#endif // TENSORFLOW_DTENSOR_MLIR_EXPANSIONS_SLICE_SPMD_EXPANDER_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/dtensor/mlir/expansions/slice_spmd_expander.h
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.gradle.plugin;
import org.gradle.api.Buildable;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.PublishArtifact;
import org.gradle.api.artifacts.PublishArtifactSet;
import org.gradle.api.artifacts.dsl.ArtifactHandler;
import org.gradle.api.tasks.TaskDependency;
import org.gradle.api.tasks.TaskProvider;
import org.gradle.api.tasks.bundling.Jar;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.gradle.tasks.bundling.BootJar;
import org.springframework.boot.gradle.tasks.bundling.BootWar;
/**
* A wrapper for a {@link PublishArtifactSet} that ensures that only a single artifact is
* published, with a war file taking precedence over a jar file.
*
* @author Andy Wilkinson
* @author Scott Frederick
*/
final class SinglePublishedArtifact implements Buildable {
private final Configuration configuration;
private final ArtifactHandler handler;
private @Nullable PublishArtifact currentArtifact;
SinglePublishedArtifact(Configuration configuration, ArtifactHandler handler) {
this.configuration = configuration;
this.handler = handler;
}
void addWarCandidate(TaskProvider<BootWar> candidate) {
add(candidate);
}
void addJarCandidate(TaskProvider<BootJar> candidate) {
if (this.currentArtifact == null) {
add(candidate);
}
}
private void add(TaskProvider<? extends Jar> artifact) {
this.configuration.getArtifacts().remove(this.currentArtifact);
this.currentArtifact = this.handler.add(this.configuration.getName(), artifact);
}
@Override
public TaskDependency getBuildDependencies() {
return this.configuration.getArtifacts().getBuildDependencies();
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
build-plugin/spring-boot-gradle-plugin/src/main/java/org/springframework/boot/gradle/plugin/SinglePublishedArtifact.java
|
{
"html": {
"type": "Fragment",
"start": 0,
"end": 17,
"children": [
{
"type": "IfBlock",
"start": 0,
"end": 17,
"expression": {
"type": "Identifier",
"start": 5,
"end": 8,
"loc": {
"start": {
"line": 1,
"column": 5
},
"end": {
"line": 1,
"column": 8
}
},
"name": "foo"
},
"children": [
{
"type": "Text",
"start": 9,
"end": 12,
"raw": "bar",
"data": "bar"
}
]
}
]
}
}
|
json
|
github
|
https://github.com/sveltejs/svelte
|
packages/svelte/tests/parser-legacy/samples/if-block/output.json
|
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
WSGI Test Server
This builds upon paste.util.baseserver to customize it for regressions
where using raw_interactive won't do.
"""
import time
from paste.httpserver import *
class WSGIRegressionServer(WSGIServer):
"""
A threaded WSGIServer for use in regression testing. To use this
module, call serve(application, regression=True), and then call
server.accept() to let it handle one request. When finished, use
server.stop() to shutdown the server. Note that all pending requests
are processed before the server shuts down.
"""
defaulttimeout = 10
def __init__ (self, *args, **kwargs):
WSGIServer.__init__(self, *args, **kwargs)
self.stopping = []
self.pending = []
self.timeout = self.defaulttimeout
# this is a local connection, be quick
self.socket.settimeout(2)
def serve_forever(self):
from threading import Thread
thread = Thread(target=self.serve_pending)
thread.start()
def reset_expires(self):
if self.timeout:
self.expires = time.time() + self.timeout
def close_request(self, *args, **kwargs):
WSGIServer.close_request(self, *args, **kwargs)
self.pending.pop()
self.reset_expires()
def serve_pending(self):
self.reset_expires()
while not self.stopping or self.pending:
now = time.time()
if now > self.expires and self.timeout:
# note regression test doesn't handle exceptions in
# threads very well; so we just print and exit
print "\nWARNING: WSGIRegressionServer timeout exceeded\n"
break
if self.pending:
self.handle_request()
time.sleep(.1)
def stop(self):
""" stop the server (called from tester's thread) """
self.stopping.append(True)
def accept(self, count = 1):
""" accept another request (called from tester's thread) """
assert not self.stopping
[self.pending.append(True) for x in range(count)]
def serve(application, host=None, port=None, handler=None):
server = WSGIRegressionServer(application, host, port, handler)
print "serving on %s:%s" % server.server_address
server.serve_forever()
return server
if __name__ == '__main__':
import urllib
from paste.wsgilib import dump_environ
server = serve(dump_environ)
baseuri = ("http://%s:%s" % server.server_address)
def fetch(path):
# tell the server to humor exactly one more request
server.accept(1)
# not needed; but this is what you do if the server
# may not respond in a resonable time period
import socket
socket.setdefaulttimeout(5)
# build a uri, fetch and return
return urllib.urlopen(baseuri + path).read()
assert "PATH_INFO: /foo" in fetch("/foo")
assert "PATH_INFO: /womble" in fetch("/womble")
# ok, let's make one more final request...
server.accept(1)
# and then schedule a stop()
server.stop()
# and then... fetch it...
urllib.urlopen(baseuri)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2002 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* Command Line Option Parsing Code
*
* $Id: jas_getopt.h,v 1.2 2008-05-26 09:41:51 vp153 Exp $
*/
#ifndef JAS_GETOPT_H
#define JAS_GETOPT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <jasper/jas_config.h>
/******************************************************************************\
* Constants.
\******************************************************************************/
#define JAS_GETOPT_EOF (-1)
#define JAS_GETOPT_ERR '?'
/* option flags. */
#define JAS_OPT_HASARG 0x01 /* option has argument */
/******************************************************************************\
* Types.
\******************************************************************************/
/* Command line option type. */
typedef struct {
int id;
/* The unique identifier for this option. */
char *name;
/* The name of this option. */
int flags;
/* option flags. */
} jas_opt_t;
/******************************************************************************\
* External data.
\******************************************************************************/
/* The current option index. */
extern int jas_optind;
/* The current option argument. */
extern char *jas_optarg;
/* The debug level. */
extern int jas_opterr;
/******************************************************************************\
* Prototypes.
\******************************************************************************/
/* Get the next option. */
int jas_getopt(int argc, char **argv, jas_opt_t *opts);
#ifdef __cplusplus
}
#endif
#endif
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/libjasper/jasper/jas_getopt.h
|
#!/usr/bin/env python
import mozprofile
import os
import shutil
import tempfile
import unittest
here = os.path.dirname(os.path.abspath(__file__))
class Bug758250(unittest.TestCase):
"""
use of --profile in mozrunner just blows away addon sources:
https://bugzilla.mozilla.org/show_bug.cgi?id=758250
"""
def test_profile_addon_cleanup(self):
# sanity check: the empty addon should be here
empty = os.path.join(here, 'addons', 'empty')
self.assertTrue(os.path.exists(empty))
self.assertTrue(os.path.isdir(empty))
self.assertTrue(os.path.exists(os.path.join(empty, 'install.rdf')))
# because we are testing data loss, let's make sure we make a copy
tmpdir = tempfile.mktemp()
shutil.copytree(empty, tmpdir)
self.assertTrue(os.path.exists(os.path.join(tmpdir, 'install.rdf')))
# make a starter profile
profile = mozprofile.FirefoxProfile()
path = profile.profile
# make a new profile based on the old
newprofile = mozprofile.FirefoxProfile(profile=path, addons=[tmpdir])
newprofile.cleanup()
# the source addon *should* still exist
self.assertTrue(os.path.exists(tmpdir))
self.assertTrue(os.path.exists(os.path.join(tmpdir, 'install.rdf')))
# remove vestiges
shutil.rmtree(tmpdir)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Provides the constants needed for component."""
# All activity disabled / Device is off/standby
HVAC_MODE_OFF = "off"
# Heating
HVAC_MODE_HEAT = "heat"
# Cooling
HVAC_MODE_COOL = "cool"
# The device supports heating/cooling to a range
HVAC_MODE_HEAT_COOL = "heat_cool"
# The temperature is set based on a schedule, learned behavior, AI or some
# other related mechanism. User is not able to adjust the temperature
HVAC_MODE_AUTO = "auto"
# Device is in Dry/Humidity mode
HVAC_MODE_DRY = "dry"
# Only the fan is on, not fan and another mode like cool
HVAC_MODE_FAN_ONLY = "fan_only"
HVAC_MODES = [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
]
# No preset is active
PRESET_NONE = "none"
# Device is running an energy-saving mode
PRESET_ECO = "eco"
# Device is in away mode
PRESET_AWAY = "away"
# Device turn all valve full up
PRESET_BOOST = "boost"
# Device is in comfort mode
PRESET_COMFORT = "comfort"
# Device is in home mode
PRESET_HOME = "home"
# Device is prepared for sleep
PRESET_SLEEP = "sleep"
# Device is reacting to activity (e.g. movement sensors)
PRESET_ACTIVITY = "activity"
# Possible fan state
FAN_ON = "on"
FAN_OFF = "off"
FAN_AUTO = "auto"
FAN_LOW = "low"
FAN_MEDIUM = "medium"
FAN_HIGH = "high"
FAN_MIDDLE = "middle"
FAN_FOCUS = "focus"
FAN_DIFFUSE = "diffuse"
# Possible swing state
SWING_OFF = "off"
SWING_BOTH = "both"
SWING_VERTICAL = "vertical"
SWING_HORIZONTAL = "horizontal"
# This are support current states of HVAC
CURRENT_HVAC_OFF = "off"
CURRENT_HVAC_HEAT = "heating"
CURRENT_HVAC_COOL = "cooling"
CURRENT_HVAC_DRY = "drying"
CURRENT_HVAC_IDLE = "idle"
CURRENT_HVAC_FAN = "fan"
# A list of possible HVAC actions.
CURRENT_HVAC_ACTIONS = [
CURRENT_HVAC_OFF,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_FAN,
]
ATTR_AUX_HEAT = "aux_heat"
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_FAN_MODES = "fan_modes"
ATTR_FAN_MODE = "fan_mode"
ATTR_PRESET_MODE = "preset_mode"
ATTR_PRESET_MODES = "preset_modes"
ATTR_HUMIDITY = "humidity"
ATTR_MAX_HUMIDITY = "max_humidity"
ATTR_MIN_HUMIDITY = "min_humidity"
ATTR_MAX_TEMP = "max_temp"
ATTR_MIN_TEMP = "min_temp"
ATTR_HVAC_ACTION = "hvac_action"
ATTR_HVAC_MODES = "hvac_modes"
ATTR_HVAC_MODE = "hvac_mode"
ATTR_SWING_MODES = "swing_modes"
ATTR_SWING_MODE = "swing_mode"
ATTR_TARGET_TEMP_HIGH = "target_temp_high"
ATTR_TARGET_TEMP_LOW = "target_temp_low"
ATTR_TARGET_TEMP_STEP = "target_temp_step"
DEFAULT_MIN_TEMP = 7
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_HUMIDITY = 30
DEFAULT_MAX_HUMIDITY = 99
DOMAIN = "climate"
SERVICE_SET_AUX_HEAT = "set_aux_heat"
SERVICE_SET_FAN_MODE = "set_fan_mode"
SERVICE_SET_PRESET_MODE = "set_preset_mode"
SERVICE_SET_HUMIDITY = "set_humidity"
SERVICE_SET_HVAC_MODE = "set_hvac_mode"
SERVICE_SET_SWING_MODE = "set_swing_mode"
SERVICE_SET_TEMPERATURE = "set_temperature"
SUPPORT_TARGET_TEMPERATURE = 1
SUPPORT_TARGET_TEMPERATURE_RANGE = 2
SUPPORT_TARGET_HUMIDITY = 4
SUPPORT_FAN_MODE = 8
SUPPORT_PRESET_MODE = 16
SUPPORT_SWING_MODE = 32
SUPPORT_AUX_HEAT = 64
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia feedback threads and messages."""
from core.platform import models
import feconf
import utils
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
STATUS_CHOICES_OPEN = 'open'
STATUS_CHOICES_FIXED = 'fixed'
STATUS_CHOICES_IGNORED = 'ignored'
STATUS_CHOICES_COMPLIMENT = 'compliment'
STATUS_CHOICES_NOT_ACTIONABLE = 'not_actionable'
STATUS_CHOICES = [
STATUS_CHOICES_OPEN,
STATUS_CHOICES_FIXED,
STATUS_CHOICES_IGNORED,
STATUS_CHOICES_COMPLIMENT,
STATUS_CHOICES_NOT_ACTIONABLE,
]
# Constants used for generating new ids.
_MAX_RETRIES = 10
_RAND_RANGE = 127 * 127
class FeedbackThreadModel(base_models.BaseModel):
"""Threads for each exploration.
The id/key of instances of this class has the form
[EXPLORATION_ID].[THREAD_ID]
"""
# ID of the exploration the thread is about.
exploration_id = ndb.StringProperty(required=True, indexed=True)
# ID of state the thread is for. Does not exist if the thread is about the
# entire exploration.
state_name = ndb.StringProperty(indexed=True)
# ID of the user who started the thread. This may be None if the feedback
# was given anonymously by a learner.
original_author_id = ndb.StringProperty(indexed=True)
# Latest status of the thread.
status = ndb.StringProperty(
default=STATUS_CHOICES_OPEN,
choices=STATUS_CHOICES,
required=True,
indexed=True,
)
# Latest subject of the thread.
subject = ndb.StringProperty(indexed=False)
# Summary text of the thread.
summary = ndb.TextProperty(indexed=False)
@classmethod
def generate_new_thread_id(cls, exploration_id):
"""Generates a new thread id, unique within the exploration.
Exploration ID + the generated thread ID is globally unique.
"""
for _ in range(_MAX_RETRIES):
thread_id = (
utils.base64_from_int(utils.get_current_time_in_millisecs()) +
utils.base64_from_int(utils.get_random_int(_RAND_RANGE)))
if not cls.get_by_exp_and_thread_id(exploration_id, thread_id):
return thread_id
raise Exception(
'New thread id generator is producing too many collisions.')
@classmethod
def _generate_id(cls, exploration_id, thread_id):
return '.'.join([exploration_id, thread_id])
@classmethod
def create(cls, exploration_id, thread_id):
"""Creates a new FeedbackThreadModel entry.
Throws an exception if a thread with the given exploration ID and
thread ID combination exists already.
"""
instance_id = cls._generate_id(exploration_id, thread_id)
if cls.get_by_id(instance_id):
raise Exception('Feedback thread ID conflict on create.')
return cls(id=instance_id)
@classmethod
def get_by_exp_and_thread_id(cls, exploration_id, thread_id):
"""Gets the FeedbackThreadModel entry for the given ID.
Returns None if the thread is not found or is already deleted.
"""
return cls.get_by_id(cls._generate_id(exploration_id, thread_id))
@classmethod
def get_threads(cls, exploration_id):
"""Returns an array of threads associated to the exploration.
Does not include the deleted entries.
"""
return cls.get_all().filter(
cls.exploration_id == exploration_id).fetch(
feconf.DEFAULT_QUERY_LIMIT)
class FeedbackMessageModel(base_models.BaseModel):
"""Feedback messages. One or more of these messages make a thread.
The id/key of instances of this class has the form
[EXPLORATION_ID].[THREAD_ID].[MESSAGE_ID]
"""
# ID corresponding to an entry of FeedbackThreadModel in the form of
# [EXPLORATION_ID].[THREAD_ID]
thread_id = ndb.StringProperty(required=True, indexed=True)
# 0-based sequential numerical ID. Sorting by this field will create the
# thread in chronological order.
message_id = ndb.IntegerProperty(required=True, indexed=True)
# ID of the user who posted this message. This may be None if the feedback
# was given anonymously by a learner.
author_id = ndb.StringProperty(indexed=True)
# New thread status. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the status changes.
updated_status = ndb.StringProperty(choices=STATUS_CHOICES, indexed=True)
# New thread subject. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the subject changes.
updated_subject = ndb.StringProperty(indexed=False)
# Message text. Allowed not to exist (e.g. post only to update the status).
text = ndb.StringProperty(indexed=False)
@classmethod
def _generate_id(cls, thread_id, message_id):
return '.'.join([thread_id, str(message_id)])
@property
def exploration_id(self):
return self.id.split('.')[0]
def get_thread_subject(self):
return FeedbackThreadModel.get_by_id(self.thread_id).subject
@classmethod
def create(cls, thread_id, message_id):
"""Creates a new FeedbackMessageModel entry.
Throws an exception if a message with the given thread ID and message
ID combination exists already.
"""
instance_id = cls._generate_id(thread_id, message_id)
if cls.get_by_id(instance_id):
raise Exception('Feedback message ID conflict on create.')
return cls(id=instance_id)
@classmethod
def get(cls, thread_id, message_id, strict=True):
"""Gets the FeedbackMessageModel entry for the given ID.
If the message id is valid and it is not marked as deleted, returns the
message instance. Otherwise:
- if strict is True, raises EntityNotFoundError
- if strict is False, returns None.
"""
instance_id = cls._generate_id(thread_id, message_id)
return super(FeedbackMessageModel, cls).get(instance_id, strict=strict)
@classmethod
def get_messages(cls, thread_id):
"""Returns an array of messages in the thread.
Does not include the deleted entries.
"""
return cls.get_all().filter(
cls.thread_id == thread_id).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_most_recent_message(cls, thread_id):
return cls.get_all().filter(
cls.thread_id == thread_id).order(-cls.last_updated).get()
@classmethod
def get_message_count(cls, thread_id):
"""Returns the number of messages in the thread.
Includes the deleted entries.
"""
return cls.get_all(include_deleted_entities=True).filter(
cls.thread_id == thread_id).count()
@classmethod
def get_all_messages(cls, page_size, urlsafe_start_cursor):
return cls._fetch_page_sorted_by_last_updated(
cls.query(), page_size, urlsafe_start_cursor)
class FeedbackAnalyticsModel(base_models.BaseMapReduceBatchResultsModel):
"""Model for storing feedback thread analytics for an exploration.
The key of each instance is the exploration id.
"""
# The number of open feedback threads filed against this exploration.
num_open_threads = ndb.IntegerProperty(default=None, indexed=True)
# Total number of feedback threads filed against this exploration.
num_total_threads = ndb.IntegerProperty(default=None, indexed=True)
@classmethod
def create(cls, model_id, num_open_threads, num_total_threads):
"""Creates a new FeedbackAnalyticsModel entry."""
cls(
id=model_id,
num_open_threads=num_open_threads,
num_total_threads=num_total_threads
).put()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Test data file to be stored within a zip file.
FAVORITE_NUMBER = 5
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/archivetestdata/testdata_module_inside_zip.py
|
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api import expose
from magnum.api import utils as api_utils
from magnum.api.validation import validate_bay_properties
from magnum.common import exception
from magnum.common import policy
from magnum import objects
from magnum.objects import fields
class BayPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return ['/baymodel_id']
@staticmethod
def internal_attrs():
internal_attrs = ['/api_address', '/node_addresses',
'/master_addresses', '/stack_id',
'/ca_cert_ref', '/magnum_cert_ref',
'/trust_id', '/trustee_user_name',
'/trustee_password', '/trustee_user_id']
return types.JsonPatchType.internal_attrs() + internal_attrs
class Bay(base.APIBase):
"""API representation of a bay.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a bay.
"""
_baymodel_id = None
def _get_baymodel_id(self):
return self._baymodel_id
def _set_baymodel_id(self, value):
if value and self._baymodel_id != value:
try:
baymodel = api_utils.get_resource('BayModel', value)
self._baymodel_id = baymodel.uuid
except exception.BayModelNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Bay
e.code = 400 # BadRequest
raise e
elif value == wtypes.Unset:
self._baymodel_id = wtypes.Unset
uuid = types.uuid
"""Unique UUID for this bay"""
name = wtypes.StringType(min_length=1, max_length=255)
"""Name of this bay"""
baymodel_id = wsme.wsproperty(wtypes.text, _get_baymodel_id,
_set_baymodel_id, mandatory=True)
"""The baymodel UUID"""
node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
"""The node count for this bay. Default to 1 if not set"""
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
"""The number of master nodes for this bay. Default to 1 if not set"""
bay_create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=0)
"""Timeout for creating the bay in minutes. Default to 0 if not set"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated bay links"""
stack_id = wsme.wsattr(wtypes.text, readonly=True)
"""Stack id of the heat stack"""
status = wtypes.Enum(str, *fields.BayStatus.ALL)
"""Status of the bay from the heat stack"""
status_reason = wtypes.text
"""Status reason of the bay from the heat stack"""
discovery_url = wtypes.text
"""Url used for bay node discovery"""
api_address = wsme.wsattr(wtypes.text, readonly=True)
"""Api address of cluster master node"""
node_addresses = wsme.wsattr([wtypes.text], readonly=True)
"""IP addresses of cluster slave nodes"""
master_addresses = wsme.wsattr([wtypes.text], readonly=True)
"""IP addresses of cluster master nodes"""
def __init__(self, **kwargs):
super(Bay, self).__init__()
self.fields = []
for field in objects.Bay.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(bay, url, expand=True):
if not expand:
bay.unset_fields_except(['uuid', 'name', 'baymodel_id',
'node_count', 'status',
'bay_create_timeout', 'master_count',
'stack_id'])
bay.links = [link.Link.make_link('self', url,
'bays', bay.uuid),
link.Link.make_link('bookmark', url,
'bays', bay.uuid,
bookmark=True)]
return bay
@classmethod
def convert_with_links(cls, rpc_bay, expand=True):
bay = Bay(**rpc_bay.as_dict())
return cls._convert_with_links(bay, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
baymodel_id='4a96ac4b-2447-43f1-8ca6-9fd6f36d146d',
node_count=2,
master_count=1,
bay_create_timeout=15,
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
status=fields.BayStatus.CREATE_COMPLETE,
status_reason="CREATE completed successfully",
api_address='172.24.4.3',
node_addresses=['172.24.4.4', '172.24.4.5'],
created_at=timeutils.utcnow(),
updated_at=timeutils.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
class BayCollection(collection.Collection):
"""API representation of a collection of bays."""
bays = [Bay]
"""A list containing bays objects"""
def __init__(self, **kwargs):
self._type = 'bays'
@staticmethod
def convert_with_links(rpc_bays, limit, url=None, expand=False, **kwargs):
collection = BayCollection()
collection.bays = [Bay.convert_with_links(p, expand)
for p in rpc_bays]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.bays = [Bay.sample(expand=False)]
return sample
class BaysController(rest.RestController):
"""REST controller for Bays."""
def __init__(self):
super(BaysController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _get_bays_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
marker)
bays = objects.Bay.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return BayCollection.convert_with_links(bays, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@expose.expose(BayCollection, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of bays.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'bay:get_all',
action='bay:get_all')
return self._get_bays_collection(marker, limit, sort_key,
sort_dir)
@expose.expose(BayCollection, types.uuid, int, wtypes.text,
wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of bays with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'bay:detail',
action='bay:detail')
# NOTE(lucasagomes): /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "bays":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['bays', 'detail'])
return self._get_bays_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@expose.expose(Bay, types.uuid_or_name)
def get_one(self, bay_ident):
"""Retrieve information about the given bay.
:param bay_ident: UUID of a bay or logical name of the bay.
"""
context = pecan.request.context
bay = api_utils.get_resource('Bay', bay_ident)
policy.enforce(context, 'bay:get', bay,
action='bay:get')
return Bay.convert_with_links(bay)
@expose.expose(Bay, body=Bay, status_code=201)
def post(self, bay):
"""Create a new bay.
:param bay: a bay within the request body.
"""
context = pecan.request.context
policy.enforce(context, 'bay:create',
action='bay:create')
baymodel = objects.BayModel.get_by_uuid(context, bay.baymodel_id)
attr_validator.validate_os_resources(context, baymodel.as_dict())
bay_dict = bay.as_dict()
bay_dict['project_id'] = context.project_id
bay_dict['user_id'] = context.user_id
if bay_dict.get('name') is None:
bay_dict['name'] = None
new_bay = objects.Bay(context, **bay_dict)
res_bay = pecan.request.rpcapi.bay_create(new_bay,
bay.bay_create_timeout)
# Set the HTTP Location Header
pecan.response.location = link.build_url('bays', res_bay.uuid)
return Bay.convert_with_links(res_bay)
@wsme.validate(types.uuid, [BayPatchType])
@expose.expose(Bay, types.uuid_or_name, body=[BayPatchType])
def patch(self, bay_ident, patch):
"""Update an existing bay.
:param bay_ident: UUID or logical name of a bay.
:param patch: a json PATCH document to apply to this bay.
"""
context = pecan.request.context
bay = api_utils.get_resource('Bay', bay_ident)
policy.enforce(context, 'bay:update', bay,
action='bay:update')
try:
bay_dict = bay.as_dict()
new_bay = Bay(**api_utils.apply_jsonpatch(bay_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Bay.fields:
try:
patch_val = getattr(new_bay, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if bay[field] != patch_val:
bay[field] = patch_val
delta = bay.obj_what_changed()
validate_bay_properties(delta)
res_bay = pecan.request.rpcapi.bay_update(bay)
return Bay.convert_with_links(res_bay)
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, bay_ident):
"""Delete a bay.
:param bay_ident: UUID of a bay or logical name of the bay.
"""
context = pecan.request.context
bay = api_utils.get_resource('Bay', bay_ident)
policy.enforce(context, 'bay:delete', bay,
action='bay:delete')
pecan.request.rpcapi.bay_delete(bay.uuid)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#*
#* GRUB -- GRand Unified Bootloader
#* Copyright (C) 2010 Free Software Foundation, Inc.
#*
#* GRUB is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* GRUB is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
#*
import re
import sys
if len (sys.argv) < 3:
print ("Usage: %s SOURCE DESTINATION" % sys.argv[0])
exit (0)
infile = open (sys.argv[3], "r")
joining = {}
for line in infile:
line = re.sub ("#.*$", "", line)
line = line.replace ("\n", "")
line = line.replace (" ", "")
if len (line) == 0 or line[0] == '\n':
continue
sp = line.split (";")
curcode = int (sp[0], 16)
if sp[2] == "U":
joining[curcode] = "NONJOINING"
elif sp[2] == "L":
joining[curcode] = "LEFT"
elif sp[2] == "R":
joining[curcode] = "RIGHT"
elif sp[2] == "D":
joining[curcode] = "DUAL"
elif sp[2] == "C":
joining[curcode] = "CAUSING"
else:
print ("Unknown joining type '%s'" % sp[2])
exit (1)
infile.close ()
infile = open (sys.argv[1], "r")
outfile = open (sys.argv[4], "w")
outfile.write ("#include <grub/unicode.h>\n")
outfile.write ("\n")
outfile.write ("struct grub_unicode_compact_range grub_unicode_compact[] = {\n")
begincode = -2
lastcode = -2
lastbiditype = "X"
lastmirrortype = False
lastcombtype = -1
arabicsubst = {}
for line in infile:
sp = line.split (";")
curcode = int (sp[0], 16)
curcombtype = int (sp[3], 10)
curbiditype = sp[4]
curmirrortype = (sp[9] == "Y")
if curcombtype <= 255 and curcombtype >= 253:
print ("UnicodeData.txt uses combination type %d. Conflict." \
% curcombtype)
raise
if sp[2] != "Lu" and sp[2] != "Ll" and sp[2] != "Lt" and sp[2] != "Lm" \
and sp[2] != "Lo"\
and sp[2] != "Me" and sp[2] != "Mc" and sp[2] != "Mn" \
and sp[2] != "Nd" and sp[2] != "Nl" and sp[2] != "No" \
and sp[2] != "Pc" and sp[2] != "Pd" and sp[2] != "Ps" \
and sp[2] != "Pe" and sp[2] != "Pi" and sp[2] != "Pf" \
and sp[2] != "Po" \
and sp[2] != "Sm" and sp[2] != "Sc" and sp[2] != "Sk" \
and sp[2] != "So"\
and sp[2] != "Zs" and sp[2] != "Zl" and sp[2] != "Zp" \
and sp[2] != "Cc" and sp[2] != "Cf" and sp[2] != "Cs" \
and sp[2] != "Co":
print ("WARNING: Unknown type %s" % sp[2])
if curcombtype == 0 and sp[2] == "Me":
curcombtype = 253
if curcombtype == 0 and sp[2] == "Mc":
curcombtype = 254
if curcombtype == 0 and sp[2] == "Mn":
curcombtype = 255
if (curcombtype >= 2 and curcombtype <= 6) \
or (curcombtype >= 37 and curcombtype != 84 and curcombtype != 91 and curcombtype != 103 and curcombtype != 107 and curcombtype != 118 and curcombtype != 122 and curcombtype != 129 and curcombtype != 130 and curcombtype != 132 and curcombtype != 202 and \
curcombtype != 214 and curcombtype != 216 and \
curcombtype != 218 and curcombtype != 220 and \
curcombtype != 222 and curcombtype != 224 and curcombtype != 226 and curcombtype != 228 and \
curcombtype != 230 and curcombtype != 232 and curcombtype != 233 and \
curcombtype != 234 and \
curcombtype != 240 and curcombtype != 253 and \
curcombtype != 254 and curcombtype != 255):
print ("WARNING: Unknown combining type %d" % curcombtype)
if curcode in joining:
curjoin = joining[curcode]
elif sp[2] == "Me" or sp[2] == "Mn" or sp[2] == "Cf":
curjoin = "TRANSPARENT"
else:
curjoin = "NONJOINING"
if sp[1].startswith ("ARABIC LETTER "):
arabname = sp[1][len ("ARABIC LETTER "):]
form = 0
if arabname.endswith (" ISOLATED FORM"):
arabname = arabname[0:len (arabname) - len (" ISOLATED FORM")]
form = 1
if arabname.endswith (" FINAL FORM"):
arabname = arabname[0:len (arabname) - len (" FINAL FORM")]
form = 2
if arabname.endswith (" MEDIAL FORM"):
arabname = arabname[0:len (arabname) - len (" MEDIAL FORM")]
form = 3
if arabname.endswith (" INITIAL FORM"):
arabname = arabname[0:len (arabname) - len (" INITIAL FORM")]
form = 4
if arabname not in arabicsubst:
arabicsubst[arabname]={}
arabicsubst[arabname][form] = curcode;
if form == 0:
arabicsubst[arabname]['join'] = curjoin
if lastcode + 1 != curcode or curbiditype != lastbiditype \
or curcombtype != lastcombtype or curmirrortype != lastmirrortype \
or curjoin != lastjoin:
if begincode != -2 and (lastbiditype != "L" or lastcombtype != 0 or \
lastmirrortype):
outfile.write (("{0x%x, 0x%x, GRUB_BIDI_TYPE_%s, %d, %d, GRUB_JOIN_TYPE_%s},\n" \
% (begincode, lastcode, lastbiditype, \
lastcombtype, lastmirrortype, \
lastjoin)))
begincode = curcode
lastcode = curcode
lastjoin = curjoin
lastbiditype = curbiditype
lastcombtype = curcombtype
lastmirrortype = curmirrortype
if lastbiditype != "L" or lastcombtype != 0 or lastmirrortype:
outfile.write (("{0x%x, 0x%x, GRUB_BIDI_TYPE_%s, %d, %d, GRUB_JOIN_TYPE_%s},\n" \
% (begincode, lastcode, lastbiditype, lastcombtype, \
lastmirrortype, lastjoin)))
outfile.write ("{0, 0, 0, 0, 0, 0},\n")
outfile.write ("};\n")
infile.close ()
infile = open (sys.argv[2], "r")
outfile.write ("struct grub_unicode_bidi_pair grub_unicode_bidi_pairs[] = {\n")
for line in infile:
line = re.sub ("#.*$", "", line)
line = line.replace ("\n", "")
line = line.replace (" ", "")
if len (line) == 0 or line[0] == '\n':
continue
sp = line.split (";")
code1 = int (sp[0], 16)
code2 = int (sp[1], 16)
outfile.write ("{0x%x, 0x%x},\n" % (code1, code2))
outfile.write ("{0, 0},\n")
outfile.write ("};\n")
infile.close ()
outfile.write ("struct grub_unicode_arabic_shape grub_unicode_arabic_shapes[] = {\n ")
for x in arabicsubst:
try:
if arabicsubst[x]['join'] == "DUAL":
outfile.write ("{0x%x, 0x%x, 0x%x, 0x%x, 0x%x},\n " % (arabicsubst[x][0], arabicsubst[x][1], arabicsubst[x][2], arabicsubst[x][3], arabicsubst[x][4]))
elif arabicsubst[x]['join'] == "RIGHT":
outfile.write ("{0x%x, 0x%x, 0x%x, 0x%x, 0x%x},\n " % (arabicsubst[x][0], arabicsubst[x][1], arabicsubst[x][2], 0, 0))
elif arabicsubst[x]['join'] == "LEFT":
outfile.write ("{0x%x, 0x%x, 0x%x, 0x%x, 0x%x},\n " % (arabicsubst[x][0], arabicsubst[x][1], 0, 0, arabicsubst[x][4]))
except:
pass
outfile.write ("{0, 0, 0, 0, 0},\n")
outfile.write ("};\n")
outfile.close ()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Tests for the raise statement."""
from test import support
import sys
import types
import unittest
def get_tb():
try:
raise OSError()
except OSError as e:
return e.__traceback__
class Context:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
return True
class TestRaise(unittest.TestCase):
def test_invalid_reraise(self):
try:
raise
except RuntimeError as e:
self.assertIn("No active exception", str(e))
else:
self.fail("No exception raised")
def test_reraise(self):
try:
try:
raise IndexError()
except IndexError as e:
exc1 = e
raise
except IndexError as exc2:
self.assertIs(exc1, exc2)
else:
self.fail("No exception raised")
def test_except_reraise(self):
def reraise():
try:
raise TypeError("foo")
except TypeError:
try:
raise KeyError("caught")
except KeyError:
pass
raise
self.assertRaises(TypeError, reraise)
def test_finally_reraise(self):
def reraise():
try:
raise TypeError("foo")
except TypeError:
try:
raise KeyError("caught")
finally:
raise
self.assertRaises(KeyError, reraise)
def test_nested_reraise(self):
def nested_reraise():
raise
def reraise():
try:
raise TypeError("foo")
except TypeError:
nested_reraise()
self.assertRaises(TypeError, reraise)
def test_raise_from_None(self):
try:
try:
raise TypeError("foo")
except TypeError:
raise ValueError() from None
except ValueError as e:
self.assertIsInstance(e.__context__, TypeError)
self.assertIsNone(e.__cause__)
def test_with_reraise1(self):
def reraise():
try:
raise TypeError("foo")
except TypeError:
with Context():
pass
raise
self.assertRaises(TypeError, reraise)
def test_with_reraise2(self):
def reraise():
try:
raise TypeError("foo")
except TypeError:
with Context():
raise KeyError("caught")
raise
self.assertRaises(TypeError, reraise)
def test_yield_reraise(self):
def reraise():
try:
raise TypeError("foo")
except TypeError:
yield 1
raise
g = reraise()
next(g)
self.assertRaises(TypeError, lambda: next(g))
self.assertRaises(StopIteration, lambda: next(g))
def test_erroneous_exception(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
class TestCause(unittest.TestCase):
def testCauseSyntax(self):
try:
try:
try:
raise TypeError
except Exception:
raise ValueError from None
except ValueError as exc:
self.assertIsNone(exc.__cause__)
self.assertTrue(exc.__suppress_context__)
exc.__suppress_context__ = False
raise exc
except ValueError as exc:
e = exc
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
self.assertIsInstance(e.__context__, TypeError)
def test_invalid_cause(self):
try:
raise IndexError from 5
except TypeError as e:
self.assertIn("exception cause", str(e))
else:
self.fail("No exception raised")
def test_class_cause(self):
try:
raise IndexError from KeyError
except IndexError as e:
self.assertIsInstance(e.__cause__, KeyError)
else:
self.fail("No exception raised")
def test_class_cause_nonexception_result(self):
# See https://github.com/python/cpython/issues/140530.
class ConstructMortal(BaseException):
def __new__(*args, **kwargs):
return ["mortal value"]
msg = ".*should have returned an instance of BaseException.*"
with self.assertRaisesRegex(TypeError, msg):
raise IndexError from ConstructMortal
def test_instance_cause(self):
cause = KeyError()
try:
raise IndexError from cause
except IndexError as e:
self.assertIs(e.__cause__, cause)
else:
self.fail("No exception raised")
def test_erroneous_cause(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise IndexError from MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
class TestTraceback(unittest.TestCase):
def test_sets_traceback(self):
try:
raise IndexError()
except IndexError as e:
self.assertIsInstance(e.__traceback__, types.TracebackType)
else:
self.fail("No exception raised")
def test_accepts_traceback(self):
tb = get_tb()
try:
raise IndexError().with_traceback(tb)
except IndexError as e:
self.assertNotEqual(e.__traceback__, tb)
self.assertEqual(e.__traceback__.tb_next, tb)
else:
self.fail("No exception raised")
class TestTracebackType(unittest.TestCase):
def raiser(self):
raise ValueError
def test_attrs(self):
try:
self.raiser()
except Exception as exc:
tb = exc.__traceback__
self.assertIsInstance(tb.tb_next, types.TracebackType)
self.assertIs(tb.tb_frame, sys._getframe())
self.assertIsInstance(tb.tb_lasti, int)
self.assertIsInstance(tb.tb_lineno, int)
self.assertIs(tb.tb_next.tb_next, None)
# Invalid assignments
with self.assertRaises(TypeError):
del tb.tb_next
with self.assertRaises(TypeError):
tb.tb_next = "asdf"
# Loops
with self.assertRaises(ValueError):
tb.tb_next = tb
with self.assertRaises(ValueError):
tb.tb_next.tb_next = tb
# Valid assignments
tb.tb_next = None
self.assertIs(tb.tb_next, None)
new_tb = get_tb()
tb.tb_next = new_tb
self.assertIs(tb.tb_next, new_tb)
def test_constructor(self):
other_tb = get_tb()
frame = sys._getframe()
tb = types.TracebackType(other_tb, frame, 1, 2)
self.assertEqual(tb.tb_next, other_tb)
self.assertEqual(tb.tb_frame, frame)
self.assertEqual(tb.tb_lasti, 1)
self.assertEqual(tb.tb_lineno, 2)
tb = types.TracebackType(None, frame, 1, 2)
self.assertEqual(tb.tb_next, None)
with self.assertRaises(TypeError):
types.TracebackType("no", frame, 1, 2)
with self.assertRaises(TypeError):
types.TracebackType(other_tb, "no", 1, 2)
with self.assertRaises(TypeError):
types.TracebackType(other_tb, frame, "no", 2)
with self.assertRaises(TypeError):
types.TracebackType(other_tb, frame, 1, "nuh-uh")
class TestContext(unittest.TestCase):
def test_instance_context_instance_raise(self):
context = IndexError()
try:
try:
raise context
except IndexError:
raise OSError()
except OSError as e:
self.assertIs(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_instance_raise(self):
context = IndexError
try:
try:
raise context
except IndexError:
raise OSError()
except OSError as e:
self.assertIsNot(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_class_raise(self):
context = IndexError
try:
try:
raise context
except IndexError:
raise OSError
except OSError as e:
self.assertIsNot(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_c_exception_context(self):
try:
try:
1/0
except ZeroDivisionError:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_c_exception_raise(self):
try:
try:
1/0
except ZeroDivisionError:
xyzzy
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_noraise_finally(self):
try:
try:
pass
finally:
raise OSError
except OSError as e:
self.assertIsNone(e.__context__)
else:
self.fail("No exception raised")
def test_raise_finally(self):
try:
try:
1/0
finally:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_context_manager(self):
class ContextManager:
def __enter__(self):
pass
def __exit__(self, t, v, tb):
xyzzy
try:
with ContextManager():
1/0
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_cycle_broken(self):
# Self-cycles (when re-raising a caught exception) are broken
try:
try:
1/0
except ZeroDivisionError as e:
raise e
except ZeroDivisionError as e:
self.assertIsNone(e.__context__)
def test_reraise_cycle_broken(self):
# Non-trivial context cycles (through re-raising a previous exception)
# are broken too.
try:
try:
xyzzy
except NameError as a:
try:
1/0
except ZeroDivisionError:
raise a
except NameError as e:
self.assertIsNone(e.__context__.__context__)
def test_not_last(self):
# Context is not necessarily the last exception
context = Exception("context")
try:
raise context
except Exception:
try:
raise Exception("caught")
except Exception:
pass
try:
raise Exception("new")
except Exception as exc:
raised = exc
self.assertIs(raised.__context__, context)
def test_3118(self):
# deleting the generator caused the __context__ to be cleared
def gen():
try:
yield 1
finally:
pass
def f():
g = gen()
next(g)
try:
try:
raise ValueError
except ValueError:
del g
raise KeyError
except Exception as e:
self.assertIsInstance(e.__context__, ValueError)
f()
def test_3611(self):
import gc
# A re-raised exception in a __del__ caused the __context__
# to be cleared
class C:
def __del__(self):
try:
1/0
except ZeroDivisionError:
raise
def f():
x = C()
try:
try:
f.x
except AttributeError:
# make x.__del__ trigger
del x
gc.collect() # For PyPy or other GCs.
raise TypeError
except Exception as e:
self.assertNotEqual(e.__context__, None)
self.assertIsInstance(e.__context__, AttributeError)
with support.catch_unraisable_exception() as cm:
f()
self.assertEqual(ZeroDivisionError, cm.unraisable.exc_type)
class TestRemovedFunctionality(unittest.TestCase):
def test_tuples(self):
try:
raise (IndexError, KeyError) # This should be a tuple!
except TypeError:
pass
else:
self.fail("No exception raised")
def test_strings(self):
try:
raise "foo"
except TypeError:
pass
else:
self.fail("No exception raised")
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_raise.py
|
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_database."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
try:
import unittest2 as unittest
except ImportError:
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
class DescriptorDatabaseTest(unittest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
self.assertEqual(file_desc_proto, db.FindFileByName(
'google/protobuf/internal/factory_test2.proto'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum'))
self.assertEqual(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.MessageWithNestedEnumOnly.NestedEnum'))
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
from PyQt4 import QtCore, QtGui
import h5py
from utils import *
class DataStream():
def __init__(self, path, dataPath):
"""Initializes a HDF5 DataStream. PATH is the path to the
.hdf5 file, and DATAPATH is the path within the file to the
specific dataset.
"""
self.filePath = path
self.dataPath = dataPath
# Dictionary of the overall dataset attributes.
self.attrs = {}
# Dictionary of arg/val names to a dict of arg/val attributes,
# the arg/val dataset path, and the dataset shape.
# if a val also has dimScales which is a list of args names.
self.args = {}
self.vals = {}
self.get_attrs()
self.name = self.attrs['Name']
def get_attrs(self):
"""Load all of the attributes of the dataset into SELF.ATTRS
and all of the arg/val attributes into SELF.ARGS/SELF.VALS
as well as the shape of the arg/val, the path to the arg/val
and for vals the associated dimension scales."""
f = h5py.File(self.filePath, 'r')
for name, value in f[self.dataPath].attrs.iteritems():
self.attrs[str(name)] = value
for dsetName, dset in f[self.dataPath].iteritems():
givenName = dset.attrs['Name']
if dsetName[0:11] == 'Independent':
if givenName not in self.args.iterkeys():
dsetName = givenName
self.args[str(dsetName)] = {}
attrDict = self.args[dsetName]
elif dsetName[0:9] == 'Dependent':
if givenName not in self.vals.iterkeys():
dsetName = givenName
self.vals[str(dsetName)] = {}
attrDict = self.vals[dsetName]
else:
continue
# Adding the dataset attributes
for name, value in dset.attrs.iteritems():
attrDict[str(name)] = value
# Adding the datset path and shape.
attrDict['path'] = dset.name
attrDict['shape'] = dset.shape
# Taking care of dimension scales if the dataset
# is an independent variable. Only use the first dim scale.
for valDict in self.vals.itervalues():
dset = f[valDict['path']]
dimScales = []
for i in range(len(dset.dims)):
dimPath = dset.dims[i][0].name
dimScales += [self._get_arg_name_from_path(dimPath)]
valDict['dimScales'] = dimScales
f.close()
def get_vals(self, minDim):
"""Return a list of val names of vals of dimension greater than
or equal to MINDIM."""
valNames = []
for valName, valDict in self.vals.iteritems():
if len(valDict['shape']) >= minDim:
valNames += [valName]
return valNames
def _get_arg_name_from_path(self, path):
"""Given an argument path, return the name used as the
given argumnet's key in SELF.ARGS"""
for k, v in self.args.iteritems():
if v['path'] == path:
return k
def load_arg(self, argName, s=None):
"""Return the argument ARGNAME[s]"""
f = h5py.File(self.filePath, 'r')
path = self.args[argName]['path']
if s is None:
data = f[path][:]
else:
data = f[path][s]
f.close()
return data
def load_val(self, valName, s=None):
f = h5py.File(self.filePath, 'r')
path = self.vals[valName]['path']
if s is None:
data = f[path][..., :]
else:
data = f[path][s]
f.close()
return data
def gen_slice(self, valName, sliceDict):
"""Generate a slice into val VALNAME using the dictionary SLICEDICT
which is a dictionary from the names of the axis to the desired axis slice."""
argNames = self.get_args_to_val(valName)
s = []
for argName in argNames:
argSlice = sliceDict[argName]
if isinstance(argSlice, tuple):
argSlice = slice(*argSlice)
s += [argSlice]
return tuple(s)
def get_args_to_val(self, valName):
"""Return the name of all the arguments to the value VALNAME."""
return self.vals[valName]['dimScales']
def get_val_shape(self, valName):
return self.vals[valName]['shape']
def get_arg_shape(self, argName):
"""Return the shape of arg ARGNAME. Return the first
item of shape, since args are 1D."""
return self.args[argName]['shape'][0]
def addToTab(self, tab):
"""Add SELF to QTreeWidget TAB.TREE_DATA"""
tree = tab.tree_data
nameDict = tab.dataStreams
if self.name in nameDict.keys():
if nameDict[self.name] != self:
baseName = self.name + '({})'
self.name = uniqueName(baseName, 1, nameDict.keys())
dsTW = QtGui.QTreeWidgetItem([self.name])
for valName, valDict in self.vals.iteritems():
valTW = QtGui.QTreeWidgetItem()
valTW.setText(0, valName)
valTW.setText(1, str(valDict['shape']))
dsTW.addChild(valTW)
#setting fields for action upon right click
valTW.isClickable = True
valTW.dataType = 'val'
valTW.ds = self
for argName in self.get_args_to_val(valName):
argDict = self.args[argName]
argTW = QtGui.QTreeWidgetItem()
argTW.setText(0, argName)
argTW.setText(1, str(argDict['shape']))
valTW.addChild(argTW)
#setting fields for action upon right click
argTW.isClickable = True
argTW.dataType = 'arg'
argTW.valName = valName
argTW.ds = self
tree.addTopLevelItem(dsTW)
|
unknown
|
codeparrot/codeparrot-clean
| ||
DOCUMENTATION:
name: unique
author: Brian Coca (@bcoca)
version_added: "1.4"
short_description: set of unique items of a list
description:
- Creates a list of unique elements (a set) from the provided input list.
options:
_input:
description: A list.
type: list
required: true
case_sensitive:
description: Whether to consider case when comparing elements.
default: false
type: bool
attribute:
description: Filter objects with unique values for this attribute.
type: str
seealso:
- plugin_type: filter
plugin: ansible.builtin.difference
- plugin_type: filter
plugin: ansible.builtin.intersect
- plugin_type: filter
plugin: ansible.builtin.symmetric_difference
- plugin_type: filter
plugin: ansible.builtin.union
EXAMPLES: |
# return only the unique elements of list1
# list1: [1, 2, 5, 1, 3, 4, 10]
{{ list1 | unique }}
# => [1, 2, 5, 3, 4, 10]
# return case sensitive unique elements
{{ ['a', 'A', 'a'] | unique(case_sensitive='true') }}
# => ['a', 'A']
# return case insensitive unique elements
{{ ['b', 'B', 'b'] | unique() }}
# => ['b']
# return unique elements of list based on attribute
# => [{"age": 12, "name": "a" }, { "age": 14, "name": "b"}]
- debug:
msg: "{{ sample | unique(attribute='age') }}"
vars:
sample:
- name: a
age: 12
- name: b
age: 14
- name: c
age: 14
RETURN:
_value:
description: A list with unique elements, also known as a set.
type: list
|
unknown
|
github
|
https://github.com/ansible/ansible
|
lib/ansible/plugins/filter/unique.yml
|
import { report } from "../tick";
import "./d";
report("async2 before");
await 0;
report("async2 middle");
await 0;
report("async2 after");
|
javascript
|
github
|
https://github.com/webpack/webpack
|
test/cases/async-modules/micro-ticks-parents/case-a/async2.js
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask_restful import Resource
class ModuleResourcesManager(object):
def __init__(self, module):
self.resources = list()
self.module = module
def register_resource(self, resource):
"""
Register the resource into the ModuleResourcesManager
Inject the module name into the resource
:param resource:
:type resource: ModuleResource
:raise TypeError: Raise TypeError if resource is not an instance
of ModuleResource
"""
if not isinstance(resource, ModuleResource):
raise TypeError('Expected type ModuleResource, got %s instead' % resource.__class__.__name__)
resource.associate_to(self.module)
self.resources.append(resource)
class ModuleResource(Resource):
def __init__(self):
Resource.__init__(self)
self.method_decorators = []
self.module_name = ''
def associate_to(self, module):
self.module_name = module.name
def get(self):
raise NotImplementedError('get() MUST be override')
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*-------------------------------------------------------------------------
*
* bufmask.c
* Routines for buffer masking. Used to mask certain bits
* in a page which can be different when the WAL is generated
* and when the WAL is applied.
*
* Portions Copyright (c) 2016-2026, PostgreSQL Global Development Group
*
* Contains common routines required for masking a page.
*
* IDENTIFICATION
* src/backend/access/common/bufmask.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/bufmask.h"
/*
* mask_page_lsn_and_checksum
*
* In consistency checks, the LSN of the two pages compared will likely be
* different because of concurrent operations when the WAL is generated and
* the state of the page when WAL is applied. Also, mask out checksum as
* masking anything else on page means checksum is not going to match as well.
*/
void
mask_page_lsn_and_checksum(Page page)
{
PageHeader phdr = (PageHeader) page;
PageXLogRecPtrSet(phdr->pd_lsn, (uint64) MASK_MARKER);
phdr->pd_checksum = MASK_MARKER;
}
/*
* mask_page_hint_bits
*
* Mask hint bits in PageHeader. We want to ignore differences in hint bits,
* since they can be set without emitting any WAL.
*/
void
mask_page_hint_bits(Page page)
{
PageHeader phdr = (PageHeader) page;
/* Ignore prune_xid (it's like a hint-bit) */
phdr->pd_prune_xid = MASK_MARKER;
/* Ignore PD_PAGE_FULL and PD_HAS_FREE_LINES flags, they are just hints. */
PageClearFull(page);
PageClearHasFreeLinePointers(page);
/*
* During replay, if the page LSN has advanced past our XLOG record's LSN,
* we don't mark the page all-visible. See heap_xlog_visible() for
* details.
*/
PageClearAllVisible(page);
}
/*
* mask_unused_space
*
* Mask the unused space of a page between pd_lower and pd_upper.
*/
void
mask_unused_space(Page page)
{
int pd_lower = ((PageHeader) page)->pd_lower;
int pd_upper = ((PageHeader) page)->pd_upper;
int pd_special = ((PageHeader) page)->pd_special;
/* Sanity check */
if (pd_lower > pd_upper || pd_special < pd_upper ||
pd_lower < SizeOfPageHeaderData || pd_special > BLCKSZ)
{
elog(ERROR, "invalid page pd_lower %u pd_upper %u pd_special %u",
pd_lower, pd_upper, pd_special);
}
memset(page + pd_lower, MASK_MARKER, pd_upper - pd_lower);
}
/*
* mask_lp_flags
*
* In some index AMs, line pointer flags can be modified on the primary
* without emitting any WAL record.
*/
void
mask_lp_flags(Page page)
{
OffsetNumber offnum,
maxoff;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemId = PageGetItemId(page, offnum);
if (ItemIdIsUsed(itemId))
itemId->lp_flags = LP_UNUSED;
}
}
/*
* mask_page_content
*
* In some index AMs, the contents of deleted pages need to be almost
* completely ignored.
*/
void
mask_page_content(Page page)
{
/* Mask Page Content */
memset(page + SizeOfPageHeaderData, MASK_MARKER,
BLCKSZ - SizeOfPageHeaderData);
/* Mask pd_lower and pd_upper */
memset(&((PageHeader) page)->pd_lower, MASK_MARKER,
sizeof(uint16));
memset(&((PageHeader) page)->pd_upper, MASK_MARKER,
sizeof(uint16));
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/backend/access/common/bufmask.c
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for callable().
This converts callable(obj) into isinstance(obj, collections.Callable), adding a
collections import if needed."""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, Attr, touch_import
class FixCallable(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
# Ignore callable(*args) or use of keywords.
# Either could be a hint that the builtin callable() is not being used.
PATTERN = """
power< 'callable'
trailer< lpar='('
( not(arglist | argument<any '=' any>) func=any
| func=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
func = results['func']
touch_import(None, u'collections', node=node)
args = [func.clone(), String(u', ')]
args.extend(Attr(Name(u'collections'), Name(u'Callable')))
return Call(Name(u'isinstance'), args, prefix=node.prefix)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-11 13:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='RoutePath',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('route', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Route')),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('stop_number', models.IntegerField()),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
],
),
migrations.CreateModel(
name='Train',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(unique=True)),
('name', models.CharField(max_length=300)),
('date', models.DateTimeField()),
('departure', models.TimeField(blank=True, null=True)),
('arrival', models.TimeField(blank=True, null=True)),
('route', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Route')),
],
),
migrations.AddField(
model_name='routepath',
name='station',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.Station'),
),
migrations.AddField(
model_name='route',
name='destination',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination', to='booking.Station'),
),
migrations.AddField(
model_name='route',
name='route_path',
field=models.ManyToManyField(through='booking.RoutePath', to='booking.Station'),
),
migrations.AddField(
model_name='route',
name='source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source', to='booking.Station'),
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- This module can be used to search Amazon EFS file systems.
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
aliases: [ creation_token ]
id:
description:
- ID of Amazon EFS.
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary.
targets:
description:
- List of targets on which to filter the returned results.
- Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Find all existing efs
efs_facts:
register: result
- name: Find efs using id
efs_facts:
id: fs-1234abcd
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
filesystem_address:
description: url of file system
returned: always
type: str
sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
throughput_mode:
description: mode of throughput for the file system
returned: when botocore >= 1.10.57
type: str
sample: "bursting"
provisioned_throughput_in_mibps:
description: throughput provisioned in Mibps
returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
type: float
sample: 15.0
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
from ansible.module_utils._text import to_native
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
self.region = region
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def list_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
paginator = self.connection.get_paginator('describe_file_systems')
return paginator.paginate(**kwargs).build_full_result()['FileSystems']
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_tags(self, file_system_id):
"""
Returns tag list for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_tags')
return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_mount_targets(self, file_system_id):
"""
Returns mount targets for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_mount_targets')
return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_security_groups(self, mount_target_id):
"""
Returns security groups for selected instance of EFS
"""
return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
def get_mount_targets_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
try:
mount_targets = self.get_mount_targets(item['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
for mt in mount_targets:
item['mount_targets'].append(camel_dict_to_snake_dict(mt))
return file_systems
def get_security_groups_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
for target in item['mount_targets']:
if target['life_cycle_state'] == self.STATE_AVAILABLE:
try:
target['security_groups'] = self.get_security_groups(target['mount_target_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
else:
target['security_groups'] = []
else:
item['tags'] = {}
item['mount_targets'] = []
return file_systems
def get_file_systems(self, file_system_id=None, creation_token=None):
kwargs = dict()
if file_system_id:
kwargs['FileSystemId'] = file_system_id
if creation_token:
kwargs['CreationToken'] = creation_token
try:
file_systems = self.list_file_systems(**kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
results = list()
for item in file_systems:
item['CreationTime'] = str(item['CreationTime'])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
AWS documentation is available here:
U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
result = camel_dict_to_snake_dict(item)
result['tags'] = {}
result['mount_targets'] = []
# Set tags *after* doing camel to snake
if result['life_cycle_state'] == self.STATE_AVAILABLE:
try:
result['tags'] = self.get_tags(result['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
results.append(result)
return results
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'mount_target_id',
'subnet-': 'subnet_id',
'eni-': 'network_interface_id',
'sg-': 'security_groups'
}
return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
if str(attr_id).startswith(prefix)], 'ip_address')
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount target requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(aliases=['creation_token']),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(fs_id, name)
if tags:
file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
file_systems_info = connection.get_mount_targets_data(file_systems_info)
file_systems_info = connection.get_security_groups_data(file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import os
import vtk
from vtk.test import Testing
class SimpleGlyph:
"""A simple class used to test vtkTensorGlyph."""
def __init__(self, reader):
self.reader = reader
sg = self.src_glyph = vtk.vtkSphereSource()
sg.SetRadius(0.5)
sg.SetCenter(0.5, 0.0, 0.0)
g = self.glyph = vtk.vtkTensorGlyph()
g.SetInputConnection(self.reader.GetOutputPort())
g.SetSourceConnection(self.src_glyph.GetOutputPort())
g.SetScaleFactor(0.25)
# The normals are needed to generate the right colors and if
# not used some of the glyphs are black.
self.normals = vtk.vtkPolyDataNormals()
self.normals.SetInputConnection(g.GetOutputPort())
self.map = vtk.vtkPolyDataMapper()
self.map.SetInputConnection(self.normals.GetOutputPort())
self.act = vtk.vtkActor()
self.act.SetMapper(self.map)
# An outline.
self.of = vtk.vtkOutlineFilter()
self.of.SetInputConnection(self.reader.GetOutputPort())
self.out_map = vtk.vtkPolyDataMapper()
self.out_map.SetInputConnection(self.of.GetOutputPort())
self.out_act = vtk.vtkActor()
self.out_act.SetMapper(self.out_map)
def GetActors(self):
return self.act, self.out_act
def Update(self):
self.glyph.Update()
s = self.glyph.GetOutput().GetPointData().GetScalars()
if s:
self.map.SetScalarRange(s.GetRange())
def SetPosition(self, pos):
self.act.SetPosition(pos)
self.out_act.SetPosition(pos)
class TestTensorGlyph(Testing.vtkTest):
def testGlyphs(self):
'''Test if the glyphs are created nicely.'''
reader = vtk.vtkDataSetReader()
data_file = os.path.join(Testing.VTK_DATA_ROOT, "Data", "tensors.vtk")
reader.SetFileName(data_file)
g1 = SimpleGlyph(reader)
g1.glyph.ColorGlyphsOff()
g1.Update()
g2 = SimpleGlyph(reader)
g2.glyph.ExtractEigenvaluesOff()
g2.Update()
g2.SetPosition((2.0, 0.0, 0.0))
g3 = SimpleGlyph(reader)
g3.glyph.SetColorModeToEigenvalues()
g3.glyph.ThreeGlyphsOn()
g3.Update()
g3.SetPosition((0.0, 2.0, 0.0))
g4 = SimpleGlyph(reader)
g4.glyph.SetColorModeToEigenvalues()
g4.glyph.ThreeGlyphsOn()
g4.glyph.SymmetricOn()
g4.Update()
g4.SetPosition((2.0, 2.0, 0.0))
ren = vtk.vtkRenderer()
for i in (g1, g2, g3, g4):
for j in i.GetActors():
ren.AddActor(j)
ren.ResetCamera();
cam = ren.GetActiveCamera()
cam.Azimuth(-20)
cam.Elevation(20)
cam.Zoom(1.5)
ren.SetBackground(0.5, 0.5, 0.5)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.Render()
img_file = "TestTensorGlyph.png"
Testing.compareImage(renWin, Testing.getAbsImagePath(img_file))
Testing.interact()
if __name__ == "__main__":
Testing.main([(TestTensorGlyph, 'test')])
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationCreatedEvent", "Conversation"]
class Conversation(BaseModel):
"""The conversation resource."""
id: Optional[str] = None
"""The unique ID of the conversation."""
object: Optional[Literal["realtime.conversation"]] = None
"""The object type, must be `realtime.conversation`."""
class ConversationCreatedEvent(BaseModel):
"""Returned when a conversation is created. Emitted right after session creation."""
conversation: Conversation
"""The conversation resource."""
event_id: str
"""The unique ID of the server event."""
type: Literal["conversation.created"]
"""The event type, must be `conversation.created`."""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/realtime/conversation_created_event.py
|
benchmark:
- (1..1_000_000).last(100)
- (1..1_000_000).last(1000)
- (1..1_000_000).last(10000)
|
unknown
|
github
|
https://github.com/ruby/ruby
|
benchmark/range_last.yml
|
/*
* Copyright (C) 2005 Junio C Hamano
*/
#include "git-compat-util.h"
#include "gettext.h"
#include "diff.h"
#include "diffcore.h"
#include "wildmatch.h"
static char **order;
static int order_cnt;
static void prepare_order(const char *orderfile)
{
int cnt, pass;
struct strbuf sb = STRBUF_INIT;
const char *cp, *endp;
ssize_t sz;
if (order)
return;
sz = strbuf_read_file(&sb, orderfile, 0);
if (sz < 0)
die_errno(_("failed to read orderfile '%s'"), orderfile);
endp = sb.buf + sz;
for (pass = 0; pass < 2; pass++) {
cnt = 0;
cp = sb.buf;
while (cp < endp) {
const char *ep;
for (ep = cp; ep < endp && *ep != '\n'; ep++)
;
/* cp to ep has one line */
if (*cp == '\n' || *cp == '#')
; /* comment */
else if (pass == 0)
cnt++;
else {
order[cnt] = xmemdupz(cp, ep - cp);
cnt++;
}
if (ep < endp)
ep++;
cp = ep;
}
if (pass == 0) {
order_cnt = cnt;
ALLOC_ARRAY(order, cnt);
}
}
strbuf_release(&sb);
}
static int match_order(const char *path)
{
int i;
static struct strbuf p = STRBUF_INIT;
for (i = 0; i < order_cnt; i++) {
strbuf_reset(&p);
strbuf_addstr(&p, path);
while (p.buf[0]) {
char *cp;
if (!wildmatch(order[i], p.buf, 0))
return i;
cp = strrchr(p.buf, '/');
if (!cp)
break;
*cp = 0;
}
}
return order_cnt;
}
static int compare_objs_order(const void *a_, const void *b_)
{
struct obj_order const *a, *b;
a = (struct obj_order const *)a_;
b = (struct obj_order const *)b_;
if (a->order != b->order)
return a->order - b->order;
return a->orig_order - b->orig_order;
}
void order_objects(const char *orderfile, obj_path_fn_t obj_path,
struct obj_order *objs, int nr)
{
int i;
if (!nr)
return;
prepare_order(orderfile);
for (i = 0; i < nr; i++) {
objs[i].orig_order = i;
objs[i].order = match_order(obj_path(objs[i].obj));
}
QSORT(objs, nr, compare_objs_order);
}
static const char *pair_pathtwo(void *obj)
{
struct diff_filepair *pair = (struct diff_filepair *)obj;
return pair->two->path;
}
void diffcore_order(const char *orderfile)
{
struct diff_queue_struct *q = &diff_queued_diff;
struct obj_order *o;
int i;
if (!q->nr)
return;
ALLOC_ARRAY(o, q->nr);
for (i = 0; i < q->nr; i++)
o[i].obj = q->queue[i];
order_objects(orderfile, pair_pathtwo, o, q->nr);
for (i = 0; i < q->nr; i++)
q->queue[i] = o[i].obj;
free(o);
return;
}
|
c
|
github
|
https://github.com/git/git
|
diffcore-order.c
|
import os
import sys
import cocotb
import logging
from cocotb.result import TestFailure
from cocotb.clock import Clock
import time
from array import array as Array
from cocotb.triggers import Timer, FallingEdge
from cocotb.drivers.amba import AXI4LiteMaster
from cocotb.drivers.amba import AXI4StreamMaster
from cocotb.drivers.amba import AXI4StreamSlave
CLK_PERIOD = 10
MODULE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, "rtl")
MODULE_PATH = os.path.abspath(MODULE_PATH)
BIT_CONTROL_ENABLE = 0
BIT_CONTROL_RESET = 1
BIT_STATUS_ACTIVE = 0
REG_CONTROL = 0
REG_STATUS = 1
REG_VERSION = 2
REG_VIDEO_IN_SIZE = 4
REG_VIDEO_IN_WIDTH = 5
REG_VIDEO_IN_HEIGHT = 6
REG_VIDEO_OUT_SIZE = 8
REG_VIDEO_OUT_WIDTH = 9
REG_VIDEO_OUT_HEIGHT = 10
REG_VIDEO_IN_START_X = 12
REG_VIDEO_IN_START_Y = 13
REG_IN_FILL_PIXEL = 14
MEM_ADR_RESET = 0x01
"""
Functions Required for checking out PMOD TFT
1. Write to the controller chip internal register
2. Read from the controller chip internall register
3. Video Frame Successfully is sent from the memory
to the controller chip
4. Video Frames are continually sent out
"""
WIDTH = 8
HEIGHT = 4
H_BLANK = 40
V_BLANK = 200
PIXEL_COUNT = WIDTH * HEIGHT
@cocotb.coroutine
def axis_slave_listener(axis_slave):
data = yield axis_slave.read()
print ("read data: %s" % str(data))
def setup_dut(dut):
cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
@cocotb.test(skip = False)
def read_all_registers(dut):
"""
Description:
Read all registers from the core
Test ID: 0
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
dut.rst <= 1
dut.test_id <= 0
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
video_in = AXI4StreamSlave(dut, "AXISS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.log.info("Ready")
control = 0x02
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
control = 0x01
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield Timer(CLK_PERIOD * 100)
#Read Back All the registers, make sure they make sense
data = yield axim.read(REG_STATUS)
yield Timer(CLK_PERIOD * 10)
if data != control:
raise TestFailure("REG_STATUS Register was not correct, should be: 0x%08 but read: 0x%08X" % control, data)
data = yield axim.read(REG_VIDEO_IN_SIZE)
yield Timer(CLK_PERIOD * 10)
if data != (1280 * 720):
raise TestFailure("REG_VIDEO_IN_SIZE Register was not correct, should be: 0x%08 but read: 0x%08X" % ((1280 * 720), data))
data = yield axim.read(REG_VIDEO_IN_WIDTH)
yield Timer(CLK_PERIOD * 10)
if data != 1280:
raise TestFailure("REG_VIDEO_IN_WIDTH Register was not correct, should be: 0x%08 but read: 0x%08X" % (1280, data))
data = yield axim.read(REG_VIDEO_IN_HEIGHT)
yield Timer(CLK_PERIOD * 10)
if data != 720:
raise TestFailure("REG_VIDEO_IN_HEIGHT Register was not correct, should be: 0x%08 but read: 0x%08X" % (720, data))
data = yield axim.read(REG_VIDEO_OUT_SIZE)
yield Timer(CLK_PERIOD * 10)
if data != (1280 * 720):
raise TestFailure("REG_VIDEO_OUT_SIZE Register was not correct, should be: 0x%08 but read: 0x%08X" % ((1280 * 720), data))
data = yield axim.read(REG_VIDEO_OUT_WIDTH)
yield Timer(CLK_PERIOD * 10)
if data != 1280:
raise TestFailure("REG_VIDEO_OUT_WIDTH Register was not correct, should be: 0x%08 but read: 0x%08X" % (1280, data))
data = yield axim.read(REG_VIDEO_OUT_HEIGHT)
yield Timer(CLK_PERIOD * 10)
if data != 720:
raise TestFailure("REG_VIDEO_OUT_HEIGHT Register was not correct, should be: 0x%08 but read: 0x%08X" % (720, data))
data = yield axim.read(REG_VIDEO_IN_START_X)
yield Timer(CLK_PERIOD * 10)
if data != 0:
raise TestFailure("REG_VIDEO_IN_START_X Register was not correct, should be: 0x%08 but read: 0x%08X" % (0, data))
data = yield axim.read(REG_VIDEO_IN_START_Y)
yield Timer(CLK_PERIOD * 10)
if data != 0:
raise TestFailure("REG_VIDEO_IN_START_Y Register was not correct, should be: 0x%08 but read: 0x%08X" % (0, data))
data = yield axim.read(REG_IN_FILL_PIXEL)
yield Timer(CLK_PERIOD * 10)
if data != 0:
raise TestFailure("REG_IN_FILL_PIXEL Register was not correct, should be: 0x%08 but read: 0x%08X" % (0, data))
@cocotb.test(skip = False)
def write_downsamled_top_left_frame(dut):
"""
Description:
Write a single frame
Test ID: 1
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
IMAGE_IN_WIDTH = 3
IMAGE_IN_HEIGHT = 3
IMAGE_IN_SIZE = IMAGE_IN_WIDTH * IMAGE_IN_HEIGHT
IMAGE_OUT_WIDTH = 2
IMAGE_OUT_HEIGHT = 2
IMAGE_OUT_SIZE = IMAGE_OUT_WIDTH * IMAGE_OUT_HEIGHT
IMAGE_IN_START_X = 0
IMAGE_IN_START_Y = 0
video = []
for y in range(IMAGE_IN_HEIGHT):
for x in range(IMAGE_IN_WIDTH):
video.append((IMAGE_IN_WIDTH * y) + x)
dut.rst <= 1
dut.test_id <= 1
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
video_in = AXI4StreamSlave(dut, "AXISS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.log.info("Ready")
cocotb.fork(axis_slave_listener(video_in))
control = 0x02
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_WIDTH, IMAGE_IN_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_HEIGHT, IMAGE_IN_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_SIZE, IMAGE_IN_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_WIDTH, IMAGE_OUT_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_HEIGHT, IMAGE_OUT_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_SIZE, IMAGE_OUT_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_X, IMAGE_IN_START_X)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_Y, IMAGE_IN_START_Y)
yield Timer(CLK_PERIOD * 10)
control = 0x01
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield video_out.write(video)
yield Timer(CLK_PERIOD * 500)
@cocotb.test(skip = False)
def write_one_to_one_frame(dut):
"""
Description:
Write a single frame
Test ID: 1
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
IMAGE_IN_WIDTH = 3
IMAGE_IN_HEIGHT = 3
IMAGE_IN_SIZE = IMAGE_IN_WIDTH * IMAGE_IN_HEIGHT
IMAGE_OUT_WIDTH = 3
IMAGE_OUT_HEIGHT = 3
IMAGE_OUT_SIZE = IMAGE_OUT_WIDTH * IMAGE_OUT_HEIGHT
IMAGE_IN_START_X = 0
IMAGE_IN_START_Y = 0
video = []
for y in range(IMAGE_IN_HEIGHT):
for x in range(IMAGE_IN_WIDTH):
video.append((IMAGE_IN_WIDTH * y) + x)
dut.rst <= 1
dut.test_id <= 2
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
video_in = AXI4StreamSlave(dut, "AXISS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.log.info("Ready")
cocotb.fork(axis_slave_listener(video_in))
control = 0x02
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_WIDTH, IMAGE_IN_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_HEIGHT, IMAGE_IN_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_SIZE, IMAGE_IN_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_WIDTH, IMAGE_OUT_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_HEIGHT, IMAGE_OUT_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_SIZE, IMAGE_OUT_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_X, IMAGE_IN_START_X)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_Y, IMAGE_IN_START_Y)
yield Timer(CLK_PERIOD * 10)
control = 0x01
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield video_out.write(video)
yield Timer(CLK_PERIOD * 500)
@cocotb.test(skip = False)
def write_bottom_right_frame(dut):
"""
Description:
Write a single frame
Test ID: 1
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
IMAGE_IN_WIDTH = 3
IMAGE_IN_HEIGHT = 3
IMAGE_IN_SIZE = IMAGE_IN_WIDTH * IMAGE_IN_HEIGHT
IMAGE_OUT_WIDTH = 2
IMAGE_OUT_HEIGHT = 2
IMAGE_OUT_SIZE = IMAGE_OUT_WIDTH * IMAGE_OUT_HEIGHT
IMAGE_IN_START_X = 1
IMAGE_IN_START_Y = 1
video = []
for y in range(IMAGE_IN_HEIGHT):
for x in range(IMAGE_IN_WIDTH):
video.append((IMAGE_IN_WIDTH * y) + x)
dut.rst <= 1
dut.test_id <= 3
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
video_in = AXI4StreamSlave(dut, "AXISS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.log.info("Ready")
cocotb.fork(axis_slave_listener(video_in))
control = 0x02
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_WIDTH, IMAGE_IN_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_HEIGHT, IMAGE_IN_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_SIZE, IMAGE_IN_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_WIDTH, IMAGE_OUT_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_HEIGHT, IMAGE_OUT_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_SIZE, IMAGE_OUT_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_X, IMAGE_IN_START_X)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_Y, IMAGE_IN_START_Y)
yield Timer(CLK_PERIOD * 10)
control = 0x01
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield video_out.write(video)
yield Timer(CLK_PERIOD * 500)
@cocotb.test(skip = False)
def write_center_frame(dut):
"""
Description:
Write a single frame
Test ID: 4
Expected Results:
A value is successfully written to the
the register of the controller.
This value should be readable from the test bench
"""
dut.test_id <= 4
IMAGE_IN_WIDTH = 4
IMAGE_IN_HEIGHT = 4
IMAGE_IN_SIZE = IMAGE_IN_WIDTH * IMAGE_IN_HEIGHT
IMAGE_OUT_WIDTH = 2
IMAGE_OUT_HEIGHT = 2
IMAGE_OUT_SIZE = IMAGE_OUT_WIDTH * IMAGE_OUT_HEIGHT
IMAGE_IN_START_X = 1
IMAGE_IN_START_Y = 1
video = []
for y in range(IMAGE_IN_HEIGHT):
for x in range(IMAGE_IN_WIDTH):
video.append((IMAGE_IN_WIDTH * y) + x)
dut.rst <= 1
axim = AXI4LiteMaster(dut, "AXIML", dut.clk)
video_out = AXI4StreamMaster(dut, "AXIMS", dut.clk, width=24)
video_in = AXI4StreamSlave(dut, "AXISS", dut.clk, width=24)
setup_dut(dut)
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.rst <= 1
yield Timer(CLK_PERIOD * 10)
dut.rst <= 0
yield Timer(CLK_PERIOD * 10)
dut.log.info("Ready")
cocotb.fork(axis_slave_listener(video_in))
control = 0x02
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_WIDTH, IMAGE_IN_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_HEIGHT, IMAGE_IN_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_SIZE, IMAGE_IN_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_WIDTH, IMAGE_OUT_WIDTH)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_HEIGHT, IMAGE_OUT_HEIGHT)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_OUT_SIZE, IMAGE_OUT_SIZE)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_X, IMAGE_IN_START_X)
yield Timer(CLK_PERIOD * 10)
yield axim.write(REG_VIDEO_IN_START_Y, IMAGE_IN_START_Y)
yield Timer(CLK_PERIOD * 10)
control = 0x01
#Reset the LCD
yield axim.write(REG_CONTROL, control)
yield Timer(CLK_PERIOD * 10)
yield video_out.write(video)
yield Timer(CLK_PERIOD * 500)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unpacks pre-built sanitizer-instrumented third-party libraries."""
import os
import subprocess
import shutil
import sys
import download_binaries
def get_archive_name(archive_prefix):
return '%s-%s.tgz' % (archive_prefix, download_binaries.get_ubuntu_release())
def main(archive_prefix, archive_dir, target_dir, stamp_dir=None):
shutil.rmtree(target_dir, ignore_errors=True)
os.mkdir(target_dir)
subprocess.check_call([
'tar',
'-zxf',
os.path.join(archive_dir, get_archive_name(archive_prefix)),
'-C',
target_dir])
stamp_file = os.path.join(stamp_dir or target_dir, '%s.txt' % archive_prefix)
open(stamp_file, 'w').close()
if stamp_dir:
with open(os.path.join(stamp_dir, '%s.d' % archive_prefix), 'w') as f:
f.write('%s: %s' % (
stamp_file, os.path.join(archive_dir,
get_archive_name(archive_prefix))))
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError,
NotSupportedError,
connection,
connections,
router,
transaction,
)
from django.db.models import F, Value
from django.db.models.functions import Concat
from django.test import (
TransactionTestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import (
City,
CityCountryProxy,
Country,
EUCity,
EUCountry,
Person,
PersonProfile,
)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ["select_for_update"]
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name="Belgium")
self.country2 = Country.objects.create(name="France")
self.city1 = City.objects.create(name="Liberchies", country=self.country1)
self.city2 = City.objects.create(name="Samois-sur-Seine", country=self.country2)
self.person = Person.objects.create(
name="Reinhardt", born=self.city1, died=self.city2
)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = "SELECT * FROM %(db_table)s %(for_update)s;" % {
"db_table": Person._meta.db_table,
"for_update": self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query["sql"] for query in queries)
@skipUnlessDBFeature("has_select_for_update")
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature("has_select_for_update_nowait")
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature("has_select_for_update_skip_locked")
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature("has_select_for_no_key_update")
def test_update_sql_generated_no_key(self):
"""
The backend's FOR NO KEY UPDATE variant appears in generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_for_update(no_key=True))
self.assertIs(self.has_for_update_sql(ctx.captured_queries, no_key=True), True)
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
Person.objects.select_related(
"born__country",
)
.select_for_update(
of=("born__country",),
)
.select_for_update(of=("self", "born__country"))
)
features = connections["default"].features
if features.select_for_update_of_column:
expected = [
'select_for_update_person"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ["select_for_update_person", "select_for_update_country"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_of_values_list(self):
queries = Person.objects.select_for_update(
of=("self",),
).values_list(Concat(Value("Dr. "), F("name")), "born")
with transaction.atomic():
values = queries.get(pk=self.person.pk)
self.assertSequenceEqual(values, ("Dr. Reinhardt", self.city1.pk))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=("self",)))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_eucountry"."country_ptr_id']
else:
expected = ["select_for_update_eucountry"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
EUCountry.objects.select_for_update(
of=(
"self",
"country_ptr",
)
)
)
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucountry"."country_ptr_id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ["select_for_update_eucountry", "select_for_update_country"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_related_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
EUCity.objects.select_related("country").select_for_update(
of=("self", "country"),
)
)
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_eucountry"."country_ptr_id',
]
else:
expected = ["select_for_update_eucity", "select_for_update_eucountry"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_model_inheritance_nested_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
EUCity.objects.select_related("country").select_for_update(
of=(
"self",
"country__country_ptr",
),
)
)
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ["select_for_update_eucity", "select_for_update_country"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_multilevel_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
EUCountry.objects.select_for_update(
of=("country_ptr", "country_ptr__entity_ptr"),
)
)
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_country"."entity_ptr_id',
'select_for_update_entity"."id',
]
else:
expected = ["select_for_update_country", "select_for_update_entity"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_sql_model_proxy_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(
CityCountryProxy.objects.select_related("country").select_for_update(
of=("country",),
)
)
if connection.features.select_for_update_of_column:
expected = ['select_for_update_country"."entity_ptr_id']
else:
expected = ["select_for_update_country"]
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=("self",)).values("pk"))
self.assertEqual(values, [{"pk": self.person.pk}])
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(
Person.objects.select_for_update(of=("self",)).values_list("pk")
)
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature("has_select_for_update_of")
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(
Person.objects.select_related("born")
.select_for_update(of=("self",))
.values("born__name")
)
self.assertEqual(values, [{"born__name": self.city1.name}])
@skipUnlessDBFeature(
"has_select_for_update_of",
"supports_select_for_update_with_limit",
)
def test_for_update_of_with_exists(self):
with transaction.atomic():
qs = Person.objects.select_for_update(of=("self", "born"))
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature("has_select_for_update_nowait", "supports_transactions")
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={"nowait": True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature("has_select_for_update_skip_locked", "supports_transactions")
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={"skip_locked": True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature("has_select_for_update_nowait")
@skipUnlessDBFeature("has_select_for_update")
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(
NotSupportedError, "NOWAIT is not supported on this database backend."
):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature("has_select_for_update_skip_locked")
@skipUnlessDBFeature("has_select_for_update")
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(
NotSupportedError, "SKIP LOCKED is not supported on this database backend."
):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature("has_select_for_update_of")
@skipUnlessDBFeature("has_select_for_update")
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = "FOR UPDATE OF is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=("self",)).get()
@skipIfDBFeature("has_select_for_no_key_update")
@skipUnlessDBFeature("has_select_for_update")
def test_unsuported_no_key_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR NO KEY UPDATE... is run
on a database backend that supports FOR UPDATE but not NO KEY.
"""
msg = "FOR NO KEY UPDATE is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(no_key=True).get()
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: self, born, born__country, "
"born__country__entity_ptr."
)
invalid_of = [
("nonexistent",),
("name",),
("born__nonexistent",),
("born__name",),
("born__nonexistent", "born__name"),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ", ".join(of)):
with transaction.atomic():
Person.objects.select_related(
"born__country"
).select_for_update(of=of).get()
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: self, born, profile."
)
for name in ["born__country", "died", "died__country"]:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related("born", "profile").exclude(
profile=None
).select_for_update(of=(name,)).get()
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_model_inheritance_of_argument_raises_error_ptr_in_choices(self):
msg = (
"Invalid field name(s) given in select_for_update(of=(...)): "
"name. Only relational fields followed in the query are allowed. "
"Choices are: self, %s."
)
with self.assertRaisesMessage(
FieldError,
msg % "country, country__country_ptr, country__country_ptr__entity_ptr",
):
with transaction.atomic():
EUCity.objects.select_related(
"country",
).select_for_update(of=("name",)).get()
with self.assertRaisesMessage(
FieldError, msg % "country_ptr, country_ptr__entity_ptr"
):
with transaction.atomic():
EUCountry.objects.select_for_update(of=("name",)).get()
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_model_proxy_of_argument_raises_error_proxy_field_in_choices(self):
msg = (
"Invalid field name(s) given in select_for_update(of=(...)): "
"name. Only relational fields followed in the query are allowed. "
"Choices are: self, country, country__entity_ptr."
)
with self.assertRaisesMessage(FieldError, msg):
with transaction.atomic():
CityCountryProxy.objects.select_related(
"country",
).select_for_update(of=("name",)).get()
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = (
Person.objects.select_related(
"profile",
)
.exclude(profile=None)
.select_for_update(of=("profile",))
.get()
)
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature("has_select_for_update")
def test_for_update_after_from(self):
features_class = connections["default"].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (
features_class.__module__,
features_class.__name__,
)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn(
"FOR UPDATE WHERE",
str(Person.objects.filter(name="foo").select_for_update().query),
)
@skipUnlessDBFeature("has_select_for_update", "supports_transactions")
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = "select_for_update cannot be used outside of a transaction."
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.select_for_update())
@skipUnlessDBFeature("has_select_for_update", "supports_transactions")
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.select_for_update()
msg = "select_for_update cannot be used outside of a transaction."
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature("supports_select_for_update_with_limit")
def test_select_for_update_with_limit(self):
other = Person.objects.create(name="Grappeli", born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.order_by("pk").select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature("supports_select_for_update_with_limit")
def test_unsupported_select_for_update_with_limit(self):
msg = (
"LIMIT/OFFSET is not supported with select_for_update on this database "
"backend."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.order_by("pk").select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append("started")
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = "Fred"
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature("has_select_for_update")
@skipUnlessDBFeature("supports_transactions")
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(target=self.run_select_for_update, args=(status,))
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError("Thread did not run and block")
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual("Reinhardt", p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh
# read, since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual("Fred", p.name)
@skipUnlessDBFeature("has_select_for_update", "supports_transactions")
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
"SELECT * FROM %s %s"
% (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True),
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != "oracle":
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={"status": status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature("has_select_for_update")
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature("has_select_for_update")
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name="Reinhardt")
self.assertEqual(person.name, "Reinhardt")
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(
ValueError, "The nowait option cannot be used with skip_locked."
):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(
id__in=Person.objects.order_by("-id").select_for_update()
)
self.assertIn("ORDER BY", str(qs.query))
|
python
|
github
|
https://github.com/django/django
|
tests/select_for_update/tests.py
|
<button ngMenuTrigger #origin #trigger="ngMenuTrigger" [menu]="formatMenu()">Open Menu</button>
<ng-template
[cdkConnectedOverlayOpen]="trigger.expanded()"
[cdkConnectedOverlay]="{origin, usePopover: 'inline'}"
[cdkConnectedOverlayPositions]="[
{originX: 'start', originY: 'bottom', overlayX: 'start', overlayY: 'top', offsetY: 4},
]"
cdkAttachPopoverAsChild
>
<div ngMenu class="menu" #formatMenu="ngMenu">
<ng-template ngMenuContent>
<div ngMenuItem value="Mark as read">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>mark_email_read</span
>
<span class="label">Mark as read</span>
</div>
<div ngMenuItem value="Snooze">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true">snooze</span>
<span class="label">Snooze</span>
</div>
<div role="separator" aria-orientation="horizontal" class="separator"></div>
<div
ngMenuItem
class="menu-item"
value="Categorize"
#categorizeItem
[submenu]="categorizeMenu()"
>
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>category</span
>
<span class="label">Categorize</span>
<span class="icon material-symbols-outlined arrow" translate="no" aria-hidden="true"
>arrow_right</span
>
</div>
<ng-template
[cdkConnectedOverlayOpen]="formatMenu.visible()"
[cdkConnectedOverlay]="{origin: categorizeItem, usePopover: 'inline'}"
[cdkConnectedOverlayPositions]="[
{originX: 'end', originY: 'top', overlayY: 'top', overlayX: 'start', offsetX: 6},
]"
cdkAttachPopoverAsChild
>
<div ngMenu class="menu" #categorizeMenu="ngMenu">
<ng-template ngMenuContent>
<div ngMenuItem value="Mark as important">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>label_important</span
>
<span class="label">Mark as important</span>
</div>
<div ngMenuItem value="Star">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>star</span
>
<span class="label">Star</span>
</div>
<div ngMenuItem value="Label">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>label</span
>
<span class="label">Label</span>
</div>
</ng-template>
</div>
</ng-template>
<div role="separator" aria-orientation="horizontal" class="separator"></div>
<div ngMenuItem value="Archive">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true"
>archive</span
>
<span class="label">Archive</span>
</div>
<div ngMenuItem value="Report spam">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true">report</span>
<span class="label">Report spam</span>
</div>
<div ngMenuItem value="Delete">
<span class="icon material-symbols-outlined" translate="no" aria-hidden="true">delete</span>
<span class="label">Delete</span>
</div>
</ng-template>
</div>
</ng-template>
|
html
|
github
|
https://github.com/angular/angular
|
adev/src/content/examples/aria/menu/src/menu-trigger/app/app.html
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sahara.plugins.mapr.base import base_version_handler as bvh
from sahara.plugins.mapr.services.drill import drill
from sahara.plugins.mapr.services.flume import flume
from sahara.plugins.mapr.services.hbase import hbase
from sahara.plugins.mapr.services.hive import hive
from sahara.plugins.mapr.services.httpfs import httpfs
from sahara.plugins.mapr.services.hue import hue
from sahara.plugins.mapr.services.impala import impala
from sahara.plugins.mapr.services.mahout import mahout
from sahara.plugins.mapr.services.management import management
from sahara.plugins.mapr.services.mapreduce import mapreduce
from sahara.plugins.mapr.services.maprfs import maprfs
from sahara.plugins.mapr.services.oozie import oozie
from sahara.plugins.mapr.services.pig import pig
from sahara.plugins.mapr.services.sqoop import sqoop2
from sahara.plugins.mapr.services.swift import swift
import sahara.plugins.mapr.versions.v4_0_1_mrv1.context as c
version = '4.0.1.mrv1'
class VersionHandler(bvh.BaseVersionHandler):
def __init__(self):
super(VersionHandler, self).__init__()
self._version = version
self._required_services = [
mapreduce.MapReduce(),
maprfs.MapRFS(),
management.Management(),
oozie.Oozie(),
]
self._services = [
maprfs.MapRFS(),
management.Management(),
oozie.OozieV401(),
hive.HiveV012(),
hive.HiveV013(),
hbase.HBaseV094(),
hbase.HBaseV098(),
httpfs.HttpFS(),
mahout.Mahout(),
pig.PigV013(),
pig.PigV014(),
swift.Swift(),
mapreduce.MapReduce(),
flume.Flume(),
drill.Drill(),
sqoop2.Sqoop2(),
impala.ImpalaV123(),
hue.Hue(),
]
def get_context(self, cluster, added=None, removed=None):
return c.Context(cluster, self, added, removed)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_print as h2p
import h2o_summ
print "Like test_summary_uniform, but with integers only"
print "focuses on numbers from 2B to 3B, which seem to have been dropped by another test?"
DO_MEDIAN = False
DO_TRY_SCIPY = False
if getpass.getuser()=='kevin' or getpass.getuser()=='jenkins':
DO_TRY_SCIPY = True
MAX_QBINS = 1
MAX_QBINS = 1000000
DO_REAL = False
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedRange = (expectedMax - expectedMin)
for i in range(rowCount):
rowData = []
if DO_REAL:
ri = expectedMin + (random.random() * expectedRange)
else:
ri = random.randint(expectedMin,expectedMax)
for j in range(colCount):
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_int2B(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# colname, (min, 25th, 50th, 75th, max)
(100000, 1, 'B.hex', 2533255332, 2633256000, ('C1', None, None, None, None, None)),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, expectedMin, expectedMax, expected) in tryList:
# max error = half the bin size?
maxDelta = ((expectedMax - expectedMin)/(MAX_QBINS + 0.0))
# add 5% for fp errors?
maxDelta = 1.05 * maxDelta
# also need to add some variance due to random distribution?
# maybe a percentage of the mean
distMean = (expectedMax - expectedMin) / 2
maxShift = distMean * .01
maxDelta = maxDelta + maxShift
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE)
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=60, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
# only one column
column = summaryResult['summaries'][0]
colname = column['colname']
if expected[0]:
self.assertEqual(colname, expected[0])
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
# FIX! we should compare mean and sd to expected?
mean = stats['mean']
sd = stats['sd']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
pct = stats['pct']
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
if expected[1]:
h2o_util.assertApproxEqual(mins[0], expected[1], tol=maxDelta, msg='min is not approx. expected')
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxDelta, msg='25th percentile is not approx. expected')
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxDelta, msg='50th percentile (median) is not approx. expected')
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxDelta, msg='75th percentile is not approx. expected')
h2o_util.assertApproxEqual(maxs[0], expected[5], tol=maxDelta, msg='max is not approx. expected')
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
print "pct:", pct
print "hcnt:", hcnt
print "len(hcnt)", len(hcnt)
# don't check the last bin
for b in hcnt[1:-1]:
# should we be able to check for a uniform distribution in the files?
e = numRows/len(hcnt) # expect 21 thresholds, so 20 bins. each 5% of rows (uniform distribution)
# apparently we can't estimate any more
# self.assertAlmostEqual(b, rowCount/len(hcnt), delta=.01*rowCount,
# msg="Bins not right. b: %s e: %s" % (b, e))
pt = h2o_util.twoDecimals(pctile)
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "pctile (2 places):", pt
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
compareActual = mn[0], pt[3], pt[5], pt[7], mx[0]
h2p.green_print("min/25/50/75/max colname:", colname, "(2 places):", compareActual)
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
trial += 1
scipyCol = 0
if __name__ == '__main__':
h2o.unit_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import logging
import requests
import json
class PassiveTotal(object):
base_url = "https://api.passivetotal.org"
headers = { 'Content-Type': 'application/json' }
api_versions = {"v2": "/v2",
"current": "/current"}
GET_resources = {"metadata": "/metadata",
"passive": "/dns/passive",
"subdomains": "/subdomains",
"tags": "/user/tags",
"watch_status": "/watching",
"compromise_status": "/ever_compromised",
"dynamic_status": "/dynamic",
"sinkhole_status": "/sinkhole",
"classification": "/classification",
"ssl_cert_by_ip": "/ssl_certificate/ip_address",
"ssl_cert_by_hash": "/ssl_certificate/hash"}
POST_resources = {"set_dynamic_status": "/dynamic",
"set_watch_status": "/watching",
"set_compromise_status": "/ever_compromised",
"add_tag": "/user/tag/add",
"remove_tag": "/user/tag/remove",
"set_classification": "/classification",
"set_sinkhole_status": "/sinkhole"}
def __init__(self, api_username, api_key, api_version=None):
self.__key = api_key
self.__username = api_username
if api_version:
try:
self.api_version = self.api_versions[api_version]
except KeyError:
logging.warning("Unrecognized API version, defaulting to v2")
self.api_version = self.api_versions["v2"]
else:
self.api_version = self.api_versions["v1"]
def retrieve_data(self, query, resource):
if self.__key:
try:
data = '{"query": "' + query + '"}'
data_encode = data.encode('ascii')
api_call = self.GET_resources[resource]
url = self.base_url + self.api_version + api_call
response = requests.get(url, headers=self.headers, data=data_encode, auth=(self._PassiveTotal__username, self._PassiveTotal__key))
json_response = json.loads(response.content.decode('utf-8'))
records = json_response['results']
results = []
for entry in records:
results.append({
'date': entry['collected'],
'firstseen': entry['firstSeen'],
'lastseen': entry['lastSeen'],
'ip': entry['resolve'],
'domain': entry['value'],
'ip_location': {}
})
return results
except KeyError:
logging.warning("Unrecognized API resource or malformed query")
return []
def submit_data(self, query, resource):
if self.__key:
try:
api_call = self.POST_resources[resource]
url = self.base_url + self.api_version + api_call
params = {"api_key": self.__key, "query": query}
response = requests.post(url, params=params)
json_response = json.loads(response.content)
return json_response
except KeyError:
logging.warning("Unrecognized API resource or malformed query")
return []
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2002-2005 Stephen Kennedy <stevek@gnome.org>
# Copyright (C) 2005 Aaron Bentley <aaron.bentley@utoronto.ca>
# Copyright (C) 2007 José Fonseca <j_r_fonseca@yahoo.co.uk>
# Copyright (C) 2010-2015 Kai Willadsen <kai.willadsen@gmail.com>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import errno
import io
import os
import re
import shutil
import stat
import tempfile
from collections import defaultdict
from meld.conf import _, ngettext
from . import _vc
NULL_SHA = "0000000000000000000000000000000000000000"
class Vc(_vc.Vc):
CMD = "git"
NAME = "Git"
VC_DIR = ".git"
DIFF_FILES_RE = r":(\d+) (\d+) ([a-z0-9]+) ([a-z0-9]+) ([XADMTU])\t(.*)"
DIFF_RE = re.compile(DIFF_FILES_RE)
conflict_map = {
# These are the arguments for git-show
# CONFLICT_MERGED has no git-show argument unfortunately.
_vc.CONFLICT_BASE: 1,
_vc.CONFLICT_LOCAL: 2,
_vc.CONFLICT_REMOTE: 3,
}
state_map = {
"X": _vc.STATE_NONE, # Unknown
"A": _vc.STATE_NEW, # New
"D": _vc.STATE_REMOVED, # Deleted
"M": _vc.STATE_MODIFIED, # Modified
"T": _vc.STATE_MODIFIED, # Type-changed
"U": _vc.STATE_CONFLICT, # Unmerged
}
@classmethod
def is_installed(cls):
try:
proc = _vc.popen([cls.CMD, '--version'])
assert proc.read().startswith('git version')
return True
except Exception:
return False
@classmethod
def check_repo_root(cls, location):
# Check exists instead of isdir, since .git might be a git-file
return os.path.exists(os.path.join(location, cls.VC_DIR))
def get_commits_to_push_summary(self):
branch_refs = self.get_commits_to_push()
unpushed_branches = len([v for v in branch_refs.values() if v])
unpushed_commits = sum(len(v) for v in branch_refs.values())
if unpushed_commits:
if unpushed_branches > 1:
# Translators: First %s is replaced by translated "%d unpushed
# commits", second %s is replaced by translated "%d branches"
label = _("%s in %s") % (
ngettext("%d unpushed commit", "%d unpushed commits",
unpushed_commits) % unpushed_commits,
ngettext("%d branch", "%d branches",
unpushed_branches) % unpushed_branches)
else:
# Translators: These messages cover the case where there is
# only one branch, and are not part of another message.
label = ngettext("%d unpushed commit", "%d unpushed commits",
unpushed_commits) % (unpushed_commits)
else:
label = ""
return label
def get_commits_to_push(self):
proc = self.run(
"for-each-ref", "--format=%(refname:short) %(upstream:short)",
"refs/heads")
branch_remotes = proc.stdout.read().split("\n")[:-1]
branch_revisions = {}
for line in branch_remotes:
try:
branch, remote = line.split()
except ValueError:
continue
proc = self.run("rev-list", branch, "^" + remote, "--")
revisions = proc.stdout.read().split("\n")[:-1]
branch_revisions[branch] = revisions
return branch_revisions
def get_files_to_commit(self, paths):
files = []
for p in paths:
if os.path.isdir(p):
cached_entries, entries = self._get_modified_files(p)
all_entries = set(entries + cached_entries)
names = [
self.DIFF_RE.search(e).groups()[5] for e in all_entries
]
files.extend(names)
else:
files.append(os.path.relpath(p, self.root))
return sorted(list(set(files)))
def get_commit_message_prefill(self):
commit_path = os.path.join(self.root, ".git", "MERGE_MSG")
if os.path.exists(commit_path):
# If I have to deal with non-ascii, non-UTF8 pregenerated commit
# messages, I'm taking up pig farming.
with open(commit_path, encoding='utf-8') as f:
message = f.read()
return "\n".join(
(l for l in message.splitlines() if not l.startswith("#")))
return None
def commit(self, runner, files, message):
command = [self.CMD, 'commit', '-m', message]
runner(command, files, refresh=True, working_dir=self.root)
def update(self, runner):
command = [self.CMD, 'pull']
runner(command, [], refresh=True, working_dir=self.root)
def push(self, runner):
command = [self.CMD, 'push']
runner(command, [], refresh=True, working_dir=self.root)
def add(self, runner, files):
command = [self.CMD, 'add']
runner(command, files, refresh=True, working_dir=self.root)
def remove(self, runner, files):
command = [self.CMD, 'rm', '-r']
runner(command, files, refresh=True, working_dir=self.root)
def revert(self, runner, files):
exists = [f for f in files if os.path.exists(f)]
missing = [f for f in files if not os.path.exists(f)]
if exists:
command = [self.CMD, 'checkout']
runner(command, exists, refresh=True, working_dir=self.root)
if missing:
command = [self.CMD, 'checkout', 'HEAD']
runner(command, missing, refresh=True, working_dir=self.root)
def resolve(self, runner, files):
command = [self.CMD, 'add']
runner(command, files, refresh=True, working_dir=self.root)
def remerge_with_ancestor(self, local, base, remote):
"""Reconstruct a mixed merge-plus-base file
This method re-merges a given file to get diff3-style conflicts
which we can then use to get a file that contains the
pre-merged result everywhere that has no conflict, and the
common ancestor anywhere there *is* a conflict.
"""
proc = self.run(
"merge-file", "-p", "--diff3", local, base, remote,
use_locale_encoding=False)
vc_file = io.BytesIO(
_vc.base_from_diff3(proc.stdout.read()))
prefix = 'meld-tmp-%s-' % _vc.CONFLICT_MERGED
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as f:
shutil.copyfileobj(vc_file, f)
return f.name, True
def get_path_for_conflict(self, path, conflict):
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
if conflict == _vc.CONFLICT_MERGED:
# Special case: no way to get merged result from git directly
local, _ = self.get_path_for_conflict(path, _vc.CONFLICT_LOCAL)
base, _ = self.get_path_for_conflict(path, _vc.CONFLICT_BASE)
remote, _ = self.get_path_for_conflict(path, _vc.CONFLICT_REMOTE)
if not (local and base and remote):
raise _vc.InvalidVCPath(self, path,
"Couldn't access conflict parents")
filename, is_temp = self.remerge_with_ancestor(local, base, remote)
for temp_file in (local, base, remote):
if os.name == "nt":
os.chmod(temp_file, stat.S_IWRITE)
os.remove(temp_file)
return filename, is_temp
path = path[len(self.root) + 1:]
if os.name == "nt":
path = path.replace("\\", "/")
args = ["git", "show", ":%s:%s" % (self.conflict_map[conflict], path)]
filename = _vc.call_temp_output(
args, cwd=self.location, file_id=_vc.conflicts[conflict])
return filename, True
def get_path_for_repo_file(self, path, commit=None):
if commit is None:
commit = "HEAD"
else:
raise NotImplementedError()
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
path = path[len(self.root) + 1:]
if os.name == "nt":
path = path.replace("\\", "/")
obj = commit + ":" + path
args = [self.CMD, "cat-file", "blob", obj]
return _vc.call_temp_output(args, cwd=self.root)
@classmethod
def valid_repo(cls, path):
# TODO: On Windows, this exit code is wrong under the normal shell; it
# appears to be correct under the default git bash shell however.
return not _vc.call([cls.CMD, "branch"], cwd=path)
def _get_modified_files(self, path):
# Update the index to avoid reading stale status information
proc = self.run("update-index", "--refresh")
# Get status differences between the index and the repo HEAD
proc = self.run("diff-index", "--cached", "HEAD", "--relative", path)
cached_entries = proc.stdout.read().split("\n")[:-1]
# Get status differences between the index and files-on-disk
proc = self.run("diff-files", "-0", "--relative", path)
entries = proc.stdout.read().split("\n")[:-1]
# Files can show up in both lists, e.g., if a file is modified,
# added to the index and changed again. This is okay, and in
# fact the calling logic requires it for staging feedback.
return cached_entries, entries
def _update_tree_state_cache(self, path):
""" Update the state of the file(s) at self._tree_cache['path'] """
while 1:
try:
cached_entries, entries = self._get_modified_files(path)
# Identify ignored files and folders
proc = self.run(
"ls-files", "--others", "--ignored", "--exclude-standard",
"--directory", path)
ignored_entries = proc.stdout.read().split("\n")[:-1]
# Identify unversioned files
proc = self.run(
"ls-files", "--others", "--exclude-standard", path)
unversioned_entries = proc.stdout.read().split("\n")[:-1]
break
except OSError as e:
if e.errno != errno.EAGAIN:
raise
def get_real_path(name):
name = name.strip()
if os.name == 'nt':
# Git returns unix-style paths on Windows
name = os.path.normpath(name)
# Unicode file names and file names containing quotes are
# returned by git as quoted strings
if name[0] == '"':
name = name.encode('latin1')
name = codecs.escape_decode(name[1:-1])[0].decode('utf-8')
return os.path.abspath(
os.path.join(self.location, name))
if not cached_entries and not entries and os.path.isfile(path):
# If we're just updating a single file there's a chance that it
# was it was previously modified, and now has been edited so that
# it is un-modified. This will result in an empty 'entries' list,
# and self._tree_cache['path'] will still contain stale data.
# When this corner case occurs we force self._tree_cache['path']
# to STATE_NORMAL.
self._tree_cache[get_real_path(path)] = _vc.STATE_NORMAL
else:
tree_meta_cache = defaultdict(list)
staged = set()
unstaged = set()
# We iterate over both cached entries and entries, accumulating
# metadata from both, but using the state from entries.
for entry in cached_entries + entries:
columns = self.DIFF_RE.search(entry).groups()
old_mode, new_mode, old_sha, new_sha, statekey, path = columns
state = self.state_map.get(statekey.strip(), _vc.STATE_NONE)
path = get_real_path(path)
self._tree_cache[path] = state
# Git entries can't be MISSING; that's just an unstaged REMOVED
self._add_missing_cache_entry(path, state)
if old_mode != new_mode:
msg = _("Mode changed from %s to %s" %
(old_mode, new_mode))
tree_meta_cache[path].append(msg)
collection = unstaged if new_sha == NULL_SHA else staged
collection.add(path)
for path in staged:
tree_meta_cache[path].append(
_("Partially staged") if path in unstaged else _("Staged"))
for path, msgs in tree_meta_cache.items():
self._tree_meta_cache[path] = "; ".join(msgs)
for path in ignored_entries:
self._tree_cache[get_real_path(path)] = _vc.STATE_IGNORED
for path in unversioned_entries:
self._tree_cache[get_real_path(path)] = _vc.STATE_NONE
|
unknown
|
codeparrot/codeparrot-clean
| ||
# https://github.com/dinoboff/github-tools/blob/master/src/github/tools/sphinx.py
#
# Copyright (c) 2009, Damien Lebrun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Modifications
# -------------
# Changed move_private_folders to better handle existing directories
"""
:Description: Sphinx extension to remove leading under-scores from directories names in the html build output directory.
"""
import os
import shutil
def setup(app):
"""
Add a html-page-context and a build-finished event handlers
"""
app.connect('html-page-context', change_pathto)
app.connect('build-finished', move_private_folders)
def change_pathto(app, pagename, templatename, context, doctree):
"""
Replace pathto helper to change paths to folders with a leading underscore.
"""
pathto = context.get('pathto')
def gh_pathto(otheruri, *args, **kw):
if otheruri.startswith('_'):
otheruri = otheruri[1:]
return pathto(otheruri, *args, **kw)
context['pathto'] = gh_pathto
def move_private_folders(app, e):
"""
remove leading underscore from folders in in the output folder.
:todo: should only affect html built
"""
def join(dir):
return os.path.join(app.builder.outdir, dir)
for item in os.listdir(app.builder.outdir):
if item.startswith('_') and os.path.isdir(join(item)):
target = join(item[1:])
if os.path.isdir(target):
shutil.rmtree(target)
shutil.move(join(item), target)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Select reactor
"""
from __future__ import division, absolute_import
from time import sleep
import sys, select, socket
from errno import EINTR, EBADF
from zope.interface import implementer
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import posixbase
from twisted.python import log
from twisted.python.runtime import platformType
def win32select(r, w, e, timeout=None):
"""Win32 select wrapper."""
if not (r or w):
# windows select() exits immediately when no sockets
if timeout is None:
timeout = 0.01
else:
timeout = min(timeout, 0.001)
sleep(timeout)
return [], [], []
# windows doesn't process 'signals' inside select(), so we set a max
# time or ctrl-c will never be recognized
if timeout is None or timeout > 0.5:
timeout = 0.5
r, w, e = select.select(r, w, w, timeout)
return r, w + e, []
if platformType == "win32":
_select = win32select
else:
_select = select.select
try:
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
except ImportError:
_extraBase = object
else:
_extraBase = _ThreadedWin32EventsMixin
@implementer(IReactorFDSet)
class SelectReactor(posixbase.PosixReactorBase, _extraBase):
"""
A select() based reactor - runs on all POSIX platforms and on Win32.
@ivar _reads: A set containing L{FileDescriptor} instances which will be
checked for read events.
@ivar _writes: A set containing L{FileDescriptor} instances which will be
checked for writability.
"""
def __init__(self):
"""
Initialize file descriptor tracking dictionaries and the base class.
"""
self._reads = set()
self._writes = set()
posixbase.PosixReactorBase.__init__(self)
def _preenDescriptors(self):
log.msg("Malformed file descriptor found. Preening lists.")
readers = list(self._reads)
writers = list(self._writes)
self._reads.clear()
self._writes.clear()
for selSet, selList in ((self._reads, readers),
(self._writes, writers)):
for selectable in selList:
try:
select.select([selectable], [selectable], [selectable], 0)
except Exception as e:
log.msg("bad descriptor %s" % selectable)
self._disconnectSelectable(selectable, e, False)
else:
selSet.add(selectable)
def doSelect(self, timeout):
"""
Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
try:
r, w, ignored = _select(self._reads,
self._writes,
[], timeout)
except ValueError:
# Possibly a file descriptor has gone negative?
self._preenDescriptors()
return
except TypeError:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptors()
return
except (select.error, socket.error, IOError) as se:
# select(2) encountered an error, perhaps while calling the fileno()
# method of a socket. (Python 2.6 socket.error is an IOError
# subclass, but on Python 2.5 and earlier it is not.)
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not self._reads) and (not self._writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptors()
return
else:
# OK, I really don't know what's going on. Blow up.
raise
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, fdset in ((r, "doRead", self._reads),
(w,"doWrite", self._writes)):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
# ^^^^ --- what the !@#*? serious! -exarkun
if selectable not in fdset:
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method)
doIteration = doSelect
def _doReadOrWrite(self, selectable, method):
try:
why = getattr(selectable, method)()
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method=="doRead")
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._reads.add(reader)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._writes.add(writer)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._reads.discard(reader)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._writes.discard(writer)
def removeAll(self):
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return list(self._reads)
def getWriters(self):
return list(self._writes)
def install():
"""Configure the twisted mainloop to be run using the select() reactor.
"""
reactor = SelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
__all__ = ['install']
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
export const enum TsVfsWorkerActions {
INIT_DEFAULT_FILE_SYSTEM_MAP = 'default-fs-ready',
CREATE_VFS_ENV_REQUEST = 'create-vfs-env-request',
CREATE_VFS_ENV_RESPONSE = 'create-vfs-env-response',
CODE_CHANGED = 'code-changed',
UPDATE_VFS_ENV_REQUEST = 'update-vfs-env-request',
AUTOCOMPLETE_REQUEST = 'autocomplete-request',
AUTOCOMPLETE_RESPONSE = 'autocomplete-response',
DIAGNOSTICS_REQUEST = 'diagnostics-request',
DIAGNOSTICS_RESPONSE = 'diagnostics-response',
DEFINE_TYPES_REQUEST = 'define-types-request',
DISPLAY_TOOLTIP_REQUEST = 'display-tooltip-request',
DISPLAY_TOOLTIP_RESPONSE = 'display-tooltip-response',
}
|
typescript
|
github
|
https://github.com/angular/angular
|
adev/src/app/editor/code-editor/workers/enums/actions.ts
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridAverage.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridAverage(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
MIN_POINTS = 'MIN_POINTS'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_1,
self.tr('The first radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_2,
self.tr('The second radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.ANGLE,
self.tr('Angle of search ellipse rotation in degrees (counter clockwise)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=360.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.MIN_POINTS,
self.tr('Minimum number of data points to use'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (moving average)')))
def name(self):
return 'gridaverage'
def displayName(self):
return self.tr('Grid (Moving average)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = ['-l']
arguments.append(layerName)
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
arguments.append(fieldName)
params = 'average'
params += ':radius1={}'.format(self.parameterAsDouble(parameters, self.RADIUS_1, context))
params += ':radius2={}'.format(self.parameterAsDouble(parameters, self.RADIUS_2, context))
params += ':angle={}'.format(self.parameterAsDouble(parameters, self.ANGLE, context))
params += ':min_points={}'.format(self.parameterAsInt(parameters, self.MIN_POINTS, context))
params += ':nodata={}'.format(self.parameterAsDouble(parameters, self.NODATA, context))
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
namespace Illuminate\Contracts\Debug;
interface ShouldntReport
{
//
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Contracts/Debug/ShouldntReport.php
|
"""
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
from scipy.optimize import _cobyla
from .optimize import OptimizeResult, _check_unknown_options
__all__ = ['fmin_cobyla']
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
rhoend=1e-4, iprint=1, maxfun=1000, disp=None, catol=2e-4):
"""
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implentation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg :
Reasonable initial changes to the variables.
rhoend :
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
iprint : {0, 1, 2, 3}
Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
Maximum number of function evaluations.
catol : float
Absolute tolerance for constraint violations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e. linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
Normal return from subroutine COBYLA
NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
X =-7.071069E-01 7.071067E-01
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
len(cons)
except TypeError:
if callable(cons):
cons = [cons]
else:
raise TypeError(err)
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
if disp is not None:
iprint = disp
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'iprint': iprint,
'disp': iprint != 0,
'maxiter': maxfun,
'catol': catol}
sol = _minimize_cobyla(func, x0, args, constraints=con,
**opts)
if iprint > 0 and not sol['success']:
print("COBYLA failed to find a solution: %s" % (sol.message,))
return sol['x']
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=2e-4, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options for the COBYLA algorithm are:
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
m = len(constraints)
def calcfc(x, con):
f = fun(x, *args)
for k, c in enumerate(constraints):
con[k] = c['fun'](x, *c['args'])
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return OptimizeResult(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
if __name__ == '__main__':
from math import sqrt
def fun(x):
return x[0] * x[1]
def cons(x):
return 1 - x[0]**2 - x[1]**2
x = fmin_cobyla(fun, [1., 1.], cons, iprint=3, disp=1)
print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from twilio.rest.base import TwilioClient
from twilio.rest.resources import (
UNSET_TIMEOUT,
Accounts,
Applications,
AuthorizedConnectApps,
CallFeedback,
CallFeedbackFactory,
CallerIds,
Calls,
Conferences,
ConnectApps,
DependentPhoneNumbers,
MediaList,
Members,
Messages,
Notifications,
Participants,
PhoneNumbers,
Queues,
Recordings,
Sandboxes,
Sip,
Sms,
Tokens,
Transcriptions,
Usage,
)
class TwilioRestClient(TwilioClient):
"""
A client for accessing the Twilio REST API
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user/account>`_
:param float timeout: The socket and read timeout for requests to Twilio
"""
def __init__(self, account=None, token=None, base="https://api.twilio.com",
version="2010-04-01", timeout=UNSET_TIMEOUT):
"""
Create a Twilio REST API client.
"""
super(TwilioRestClient, self).__init__(account, token, base, version,
timeout)
version_uri = "%s/%s" % (base, version)
self.accounts = Accounts(version_uri, self.auth, timeout)
self.applications = Applications(self.account_uri, self.auth, timeout)
self.authorized_connect_apps = AuthorizedConnectApps(
self.account_uri,
self.auth,
timeout
)
self.calls = Calls(self.account_uri, self.auth, timeout)
self.caller_ids = CallerIds(self.account_uri, self.auth, timeout)
self.connect_apps = ConnectApps(self.account_uri, self.auth, timeout)
self.notifications = Notifications(self.account_uri, self.auth,
timeout)
self.recordings = Recordings(self.account_uri, self.auth, timeout)
self.transcriptions = Transcriptions(self.account_uri, self.auth,
timeout)
self.sms = Sms(self.account_uri, self.auth, timeout)
self.phone_numbers = PhoneNumbers(self.account_uri, self.auth, timeout)
self.conferences = Conferences(self.account_uri, self.auth, timeout)
self.queues = Queues(self.account_uri, self.auth, timeout)
self.sandboxes = Sandboxes(self.account_uri, self.auth, timeout)
self.usage = Usage(self.account_uri, self.auth, timeout)
self.messages = Messages(self.account_uri, self.auth, timeout)
self.media = MediaList(self.account_uri, self.auth, timeout)
self.sip = Sip(self.account_uri, self.auth, timeout)
self.tokens = Tokens(self.account_uri, self.auth, timeout)
def participants(self, conference_sid):
"""
Return a :class:`~twilio.rest.resources.Participants` instance for the
:class:`~twilio.rest.resources.Conference` with given conference_sid
"""
base_uri = "%s/Conferences/%s" % (self.account_uri, conference_sid)
return Participants(base_uri, self.auth, self.timeout)
def members(self, queue_sid):
"""
Return a :class:`Members <twilio.rest.resources.Members>` instance for
the :class:`Queue <twilio.rest.resources.Queue>` with the
given queue_sid
"""
base_uri = "%s/Queues/%s" % (self.account_uri, queue_sid)
return Members(base_uri, self.auth, self.timeout)
def feedback(self, call_sid):
"""
Return a :class:`CallFeedback <twilio.rest.resources.CallFeedback>`
instance for the :class:`Call <twilio.rest.resources.calls.Call>`
with the given call_sid
"""
base_uri = "%s/Calls/%s/Feedback" % (self.account_uri, call_sid)
call_feedback_list = CallFeedbackFactory(
base_uri,
self.auth,
self.timeout
)
return CallFeedback(call_feedback_list)
def dependent_phone_numbers(self, address_sid):
"""
Return a :class:`DependentPhoneNumbers
<twilio.rest.resources.DependentPhoneNumbers>` instance for the
:class:`Address <twilio.rest.resources.Address>` with the given
address_sid
"""
base_uri = "%s/Addresses/%s" % (self.account_uri, address_sid)
return DependentPhoneNumbers(base_uri, self.auth, self.timeout)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# sqlite/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite
:name: SQLite
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQlite is used. The implementation classes are
:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ - in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among
other things, this means that any type name which contains the
substring ``"INT"`` will be determined to be of "integer affinity". A
type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by
SQLite to be of "integer" affinity. However, **the SQLite
autoincrement feature, whether implicitly or explicitly enabled,
requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an
application uses a type like :class:`.BigInteger` for a primary key, on
SQLite this type will need to be rendered as the name ``"INTEGER"`` when
emitting the initial ``CREATE TABLE`` statement in order for the autoincrement
behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name
to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQlite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<http://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level
----------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the `PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
http://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`~.types.BIGINT`, :class:`~.types.BLOB`,
:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`,
:class:`~.types.CHAR`, :class:`~.types.DATE`,
:class:`~.types.DATETIME`, :class:`~.types.FLOAT`,
:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`,
:class:`~.types.INTEGER`, :class:`~.types.INTEGER`,
:class:`~.types.NUMERIC`, :class:`~.types.REAL`,
:class:`~.types.SMALLINT`, :class:`~.types.TEXT`,
:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`,
:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`,
:class:`~.types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`~.types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`~.types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`~.types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`~.types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`~.types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. note::
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \\
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.execute("create table x (a integer, b integer)")
conn.execute("insert into x (a, b) values (1, 1)")
conn.execute("insert into x (a, b) values (2, 2)")
result = conn.execute("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API.
In the very specific case where
an application is forced to use column names that contain dots, and the
functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()`
is required to return these dotted names unmodified, the ``sqlite_raw_colnames``
execution option may be provided, either on a per-:class:`.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
"""
import datetime
import re
from ... import processors
from ... import sql, exc
from ... import types as sqltypes, schema as sa_schema
from ... import util
from ...engine import default, reflection
from ...sql import compiler
from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT,
INTEGER, REAL, NUMERIC, SMALLINT, TEXT,
TIMESTAMP, VARCHAR)
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0, "month": 0, "day": 0, "hour": 0,
"minute": 0, "second": 0, "microsecond": 0
}
return bool(re.search(r'[^0-9]', spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\
%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\
%(min)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop('truncate_microseconds', False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert 'storage_format' not in kwargs, "You can specify only "\
"one of truncate_microseconds or storage_format."
assert 'regexp' not in kwargs, "You can specify only one of "\
"truncate_microseconds or regexp."
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
}
elif isinstance(value, datetime_date):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
'hour': 0,
'minute': 0,
'second': 0,
'microsecond': 0,
}
else:
raise TypeError("SQLite DateTime type only accepts Python "
"datetime and date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format % {
'year': value.year,
'month': value.month,
'day': value.day,
}
else:
raise TypeError("SQLite Date type only accepts Python "
"date objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(
storage_format="%(hour)02d-%(minute)02d-%(second)02d-\
%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop('truncate_microseconds', False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert 'storage_format' not in kwargs, "You can specify only "\
"one of truncate_microseconds or storage_format."
assert 'regexp' not in kwargs, "You can specify only one of "\
"truncate_microseconds or regexp."
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format % {
'hour': value.hour,
'minute': value.minute,
'second': value.second,
'microsecond': value.microsecond,
}
else:
raise TypeError("SQLite Time type only accepts Python "
"time objects as input.")
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.Time: TIME,
}
ischema_names = {
'BIGINT': sqltypes.BIGINT,
'BLOB': sqltypes.BLOB,
'BOOL': sqltypes.BOOLEAN,
'BOOLEAN': sqltypes.BOOLEAN,
'CHAR': sqltypes.CHAR,
'DATE': sqltypes.DATE,
'DATE_CHAR': sqltypes.DATE,
'DATETIME': sqltypes.DATETIME,
'DATETIME_CHAR': sqltypes.DATETIME,
'DOUBLE': sqltypes.FLOAT,
'DECIMAL': sqltypes.DECIMAL,
'FLOAT': sqltypes.FLOAT,
'INT': sqltypes.INTEGER,
'INTEGER': sqltypes.INTEGER,
'NUMERIC': sqltypes.NUMERIC,
'REAL': sqltypes.REAL,
'SMALLINT': sqltypes.SMALLINT,
'TEXT': sqltypes.TEXT,
'TIME': sqltypes.TIME,
'TIME_CHAR': sqltypes.TIME,
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
'NVARCHAR': sqltypes.NVARCHAR,
'NCHAR': sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'month': '%m',
'day': '%d',
'year': '%Y',
'second': '%S',
'hour': '%H',
'doy': '%j',
'minute': '%M',
'epoch': '%s',
'dow': '%w',
'week': '%W',
})
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw)
)
except KeyError:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ''
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
if (column.primary_key and
column.table.dialect_options['sqlite']['autoincrement'] and
len(column.table.primary_key.columns) == 1 and
issubclass(column.type._type_affinity, sqltypes.Integer) and
not column.foreign_keys):
colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (c.primary_key and
c.table.dialect_options['sqlite']['autoincrement'] and
issubclass(c.type._type_affinity, sqltypes.Integer) and
not c.foreign_keys):
return None
return super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint)
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(
SQLiteDDLCompiler,
self).visit_foreign_key_constraint(constraint)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(self, create):
index = create.element
text = super(SQLiteDDLCompiler, self).visit_create_index(
create, include_table_schema=False)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False,
literal_binds=True)
text += " WHERE " + where_compiled
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if not isinstance(type_, _DateTimeMixin) or \
type_.format_is_text_affinity:
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
'conflict', 'constraint', 'create', 'cross', 'current_date',
'current_time', 'current_timestamp', 'database', 'default',
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on',
'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
'raise', 'references', 'reindex', 'rename', 'replace', 'restrict',
'right', 'rollback', 'row', 'select', 'set', 'table', 'temp',
'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
'when', 'where',
])
def format_index(self, index, use_schema=True, name=None):
"""Prepare a quoted index and schema name."""
if name is None:
name = index.name
result = self.quote(name, index.quote)
if (not self.omit_schema and
use_schema and
getattr(index.table, "schema", None)):
result = self.quote_schema(
index.table.schema, index.table.quote_schema) + "." + result
return result
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = 'sqlite'
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
# TODO: detect version 3.7.16 or greater;
# see [ticket:3634]
supports_right_nested_joins = False
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
supports_cast = True
supports_default_values = True
construct_arguments = [
(sa_schema.Table, {
"autoincrement": False
}),
(sa_schema.Index, {
"where": None,
}),
]
_broken_fk_pragma_quotes = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_default_values = (
self.dbapi.sqlite_version_info >= (3, 3, 8))
self.supports_cast = (
self.dbapi.sqlite_version_info >= (3, 2, 3))
self.supports_multivalues_insert = (
# http://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info >= (3, 7, 11))
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = (
self.dbapi.sqlite_version_info < (3, 6, 14))
_isolation_lookup = {
'READ UNCOMMITTED': 1,
'SERIALIZABLE': 0,
}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('PRAGMA read_uncommitted')
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s "
"WHERE type='table' ORDER BY name") % (master,)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = "SELECT name FROM sqlite_temp_master "\
"WHERE type='table' ORDER BY name "
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = "SELECT name FROM sqlite_temp_master "\
"WHERE type='view' ORDER BY name "
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema)
return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s "
"WHERE type='view' ORDER BY name") % (master,)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = '%s.sqlite_master' % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'"
"AND type='view'") % (master, view_name)
rs = connection.execute(s)
else:
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'") % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema)
columns = []
for row in info:
(name, type_, nullable, default, primary_key) = (
row[1], row[2].upper(), not row[3], row[4], row[5])
columns.append(self._get_column_info(name, type_, nullable,
default, primary_key))
return columns
def _get_column_info(self, name, type_, nullable, default, primary_key):
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = util.text_type(default)
return {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': default is None,
'primary_key': primary_key,
}
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity tules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in http://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by regcognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r'([\w ]+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ''
args = ''
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif 'INT' in coltype:
coltype = sqltypes.INTEGER
elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype:
coltype = sqltypes.TEXT
elif 'BLOB' in coltype or not coltype:
coltype = sqltypes.NullType
elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r'(\d+)', args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments." %
(coltype, args))
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col['primary_key']:
pkeys.append(col['name'])
return {'constrained_columns': pkeys, 'name': None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list",
table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (
row[0], row[2], row[3], row[4])
if rcol is None:
rcol = lcol
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': rtbl,
'referred_columns': [],
}
fks[numerical_id] = fk
fk['constrained_columns'].append(lcol)
fk['referred_columns'].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return tuple(constrained_columns) + (referred_table,) + \
tuple(referred_columns)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk['constrained_columns'],
fk['referred_table'], fk['referred_columns']),
fk
) for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
'(?:CONSTRAINT (\w+) +)?'
'FOREIGN KEY *\( *(.+?) *\) +'
'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)'
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name, constrained_columns,
referred_quoted_name, referred_name,
referred_columns) = match.group(1, 2, 3, 4, 5)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns))
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns))
referred_name = referred_quoted_name or referred_name
yield (
constraint_name, constrained_columns,
referred_name, referred_columns)
fkeys = []
for (
constraint_name, constrained_columns,
referred_name, referred_columns) in parse_fks():
sig = fk_sig(
constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (
sig,
table_name
))
continue
key = keys_by_signature.pop(sig)
key['name'] = constraint_name
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection, table_name, schema=schema,
include_auto_indexes=True, **kw):
if not idx['name'].startswith("sqlite_autoindex"):
continue
sig = tuple(idx['column_names'])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
'(?:(".+?")|([a-z0-9]+)) '
'+[a-z0-9_ ]+? +UNIQUE')
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2)))
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {
'name': name,
'column_names': cols
}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema)
indexes = []
include_auto_indexes = kw.pop('include_auto_indexes', False)
for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if (not include_auto_indexes and
row[1].startswith('sqlite_autoindex')):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in indexes:
pragma_index = self._get_table_pragma(
connection, "index_info", idx['name'])
for row in pragma_index:
idx['column_names'].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type = 'table'") % table_name
rs = connection.execute(s)
except exc.DBAPIError:
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type = 'table'") % table_name
rs = connection.execute(s)
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statement = "PRAGMA %s." % quote(schema)
else:
statement = "PRAGMA "
qtable = quote(table_name)
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
return result
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Unit tests for graph building"""
# Copyright (C) 2013 Garth N. Wells
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2013-08-10
# Last changed:
import unittest
from dolfin import *
class GraphBuilding(unittest.TestCase):
def test_build_from_mesh_simple(self):
"""Build mesh graph """
mesh = UnitCubeMesh(16, 16, 16)
D = mesh.topology().dim()
GraphBuilder.local_graph(mesh, D, 0)
GraphBuilder.local_graph(mesh, D, 1)
GraphBuilder.local_graph(mesh, 2, D)
GraphBuilder.local_graph(mesh, 1, D)
GraphBuilder.local_graph(mesh, 0, D)
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package local
import (
"path/filepath"
"testing"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/providers"
testing_provider "github.com/hashicorp/terraform/internal/providers/testing"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// TestLocal returns a configured Local struct with temporary paths and
// in-memory ContextOpts.
//
// No operations will be called on the returned value, so you can still set
// public fields without any locks.
func TestLocal(t *testing.T) *Local {
t.Helper()
tempDir, err := filepath.EvalSymlinks(t.TempDir())
if err != nil {
t.Fatal(err)
}
local := New()
local.StatePath = filepath.Join(tempDir, "state.tfstate")
local.StateOutPath = filepath.Join(tempDir, "state.tfstate")
local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak")
local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d")
local.ContextOpts = &terraform.ContextOpts{}
return local
}
// TestLocalProvider modifies the ContextOpts of the *Local parameter to
// have a provider with the given name.
func TestLocalProvider(t *testing.T, b *Local, name string, schema providers.ProviderSchema) *testing_provider.MockProvider {
// Build a mock resource provider for in-memory operations
p := new(testing_provider.MockProvider)
p.GetProviderSchemaResponse = &schema
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
// this is a destroy plan,
if req.ProposedNewState.IsNull() {
resp.PlannedState = req.ProposedNewState
resp.PlannedPrivate = req.PriorPrivate
return resp
}
rSchema := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName)
if rSchema.Body == nil {
rSchema.Body = &configschema.Block{} // default schema is empty
}
plannedVals := map[string]cty.Value{}
for name, attrS := range rSchema.Body.Attributes {
val := req.ProposedNewState.GetAttr(name)
if attrS.Computed && val.IsNull() {
val = cty.UnknownVal(attrS.Type)
}
plannedVals[name] = val
}
for name := range rSchema.Body.BlockTypes {
// For simplicity's sake we just copy the block attributes over
// verbatim, since this package's mock providers are all relatively
// simple -- we're testing the backend, not esoteric provider features.
plannedVals[name] = req.ProposedNewState.GetAttr(name)
}
return providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(plannedVals),
PlannedPrivate: req.PriorPrivate,
}
}
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
return providers.ReadResourceResponse{NewState: req.PriorState}
}
p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
return providers.ReadDataSourceResponse{State: req.Config}
}
// Initialize the opts
if b.ContextOpts == nil {
b.ContextOpts = &terraform.ContextOpts{}
}
// Set up our provider
b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider(name): providers.FactoryFixed(p),
}
return p
}
// TestLocalSingleState is a backend implementation that wraps Local
// and modifies it to only support single states (returns
// ErrWorkspacesNotSupported for multi-state operations).
//
// This isn't an actual use case, this is exported just to provide a
// easy way to test that behavior.
type TestLocalSingleState struct {
*Local
}
// TestNewLocalSingle is a factory for creating a TestLocalSingleState.
// This function matches the signature required for backend/init.
func TestNewLocalSingle() backend.Backend {
return &TestLocalSingleState{Local: New()}
}
func (b *TestLocalSingleState) Workspaces() ([]string, tfdiags.Diagnostics) {
return nil, tfdiags.Diagnostics{}.Append(backend.ErrWorkspacesNotSupported)
}
func (b *TestLocalSingleState) DeleteWorkspace(string, bool) tfdiags.Diagnostics {
return tfdiags.Diagnostics{}.Append(backend.ErrWorkspacesNotSupported)
}
func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if name != backend.DefaultStateName {
return nil, diags.Append(backend.ErrWorkspacesNotSupported)
}
return b.Local.StateMgr(name)
}
// TestLocalNoDefaultState is a backend implementation that wraps
// Local and modifies it to support named states, but not the
// default state. It returns ErrDefaultWorkspaceNotSupported when
// the DefaultStateName is used.
type TestLocalNoDefaultState struct {
*Local
}
// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState.
// This function matches the signature required for backend/init.
func TestNewLocalNoDefault() backend.Backend {
return &TestLocalNoDefaultState{Local: New()}
}
func (b *TestLocalNoDefaultState) Workspaces() ([]string, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
workspaces, wDiags := b.Local.Workspaces()
diags = diags.Append(wDiags)
if wDiags.HasErrors() {
return nil, diags
}
filtered := workspaces[:0]
for _, name := range workspaces {
if name != backend.DefaultStateName {
filtered = append(filtered, name)
}
}
return filtered, diags
}
func (b *TestLocalNoDefaultState) DeleteWorkspace(name string, force bool) tfdiags.Diagnostics {
if name == backend.DefaultStateName {
return tfdiags.Diagnostics{}.Append(backend.ErrDefaultWorkspaceNotSupported)
}
return b.Local.DeleteWorkspace(name, force)
}
func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if name == backend.DefaultStateName {
return nil, diags.Append(backend.ErrDefaultWorkspaceNotSupported)
}
return b.Local.StateMgr(name)
}
func testStateFile(t *testing.T, path string, s *states.State) {
stateFile := statemgr.NewFilesystem(path)
stateFile.WriteState(s)
}
func mustProviderConfig(s string) addrs.AbsProviderConfig {
p, diags := addrs.ParseAbsProviderConfigStr(s)
if diags.HasErrors() {
panic(diags.Err())
}
return p
}
func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance {
addr, diags := addrs.ParseAbsResourceInstanceStr(s)
if diags.HasErrors() {
panic(diags.Err())
}
return addr
}
// assertBackendStateUnlocked attempts to lock the backend state. Failure
// indicates that the state was indeed locked and therefore this function will
// return true.
func assertBackendStateUnlocked(t *testing.T, b *Local) bool {
t.Helper()
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil {
t.Errorf("state is already locked: %s", err.Error())
return false
}
return true
}
// assertBackendStateLocked attempts to lock the backend state. Failure
// indicates that the state was already locked and therefore this function will
// return false.
func assertBackendStateLocked(t *testing.T, b *Local) bool {
t.Helper()
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil {
return true
}
t.Error("unexpected success locking state")
return true
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/backend/local/testing.go
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier, David Beal
# Copyright 2013 Camptocamp SA
# Copyright 2013 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import urllib2
import base64
import xmlrpclib
import sys
from collections import defaultdict
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.addons.connector.queue.job import job, related_action
from openerp.addons.connector.event import on_record_write
from openerp.addons.connector.unit.synchronizer import (ImportSynchronizer,
ExportSynchronizer
)
from openerp.addons.connector.exception import (MappingError,
InvalidDataError,
IDMissingInBackend
)
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.connector.unit.mapper import (mapping,
ImportMapper,
)
from .unit.backend_adapter import (GenericAdapter,
MAGENTO_DATETIME_FORMAT,
)
from .unit.mapper import normalize_datetime
from .unit.import_synchronizer import (DelayedBatchImport,
MagentoImportSynchronizer,
TranslationImporter,
AddCheckpoint,
)
from .connector import get_environment
from .backend import magento
from .related_action import unwrap_binding
_logger = logging.getLogger(__name__)
def chunks(items, length):
for index in xrange(0, len(items), length):
yield items[index:index + length]
class magento_product_product(orm.Model):
_name = 'magento.product.product'
_inherit = 'magento.binding'
_inherits = {'product.product': 'openerp_id'}
_description = 'Magento Product'
def product_type_get(self, cr, uid, context=None):
return [
('simple', 'Simple Product'),
('configurable', 'Configurable Product'),
# XXX activate when supported
# ('grouped', 'Grouped Product'),
# ('virtual', 'Virtual Product'),
# ('bundle', 'Bundle Product'),
# ('downloadable', 'Downloadable Product'),
]
def _product_type_get(self, cr, uid, context=None):
return self.product_type_get(cr, uid, context=context)
_columns = {
'openerp_id': fields.many2one('product.product',
string='Product',
required=True,
ondelete='restrict'),
# XXX website_ids can be computed from categories
'website_ids': fields.many2many('magento.website',
string='Websites',
readonly=True),
'created_at': fields.date('Created At (on Magento)'),
'updated_at': fields.date('Updated At (on Magento)'),
'product_type': fields.selection(_product_type_get,
'Magento Product Type',
required=True),
'manage_stock': fields.selection(
[('use_default', 'Use Default Config'),
('no', 'Do Not Manage Stock'),
('yes', 'Manage Stock')],
string='Manage Stock Level',
required=True),
'backorders': fields.selection(
[('use_default', 'Use Default Config'),
('no', 'No Sell'),
('yes', 'Sell Quantity < 0'),
('yes-and-notification', 'Sell Quantity < 0 and '
'Use Customer Notification')],
string='Manage Inventory Backorders',
required=True),
'magento_qty': fields.float('Computed Quantity',
help="Last computed quantity to send "
"on Magento."),
'no_stock_sync': fields.boolean(
'No Stock Synchronization',
required=False,
help="Check this to exclude the product "
"from stock synchronizations."),
}
_defaults = {
'product_type': 'simple',
'manage_stock': 'use_default',
'backorders': 'use_default',
'no_stock_sync': False,
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
"A product with the same ID on Magento already exists")
]
RECOMPUTE_QTY_STEP = 1000 # products at a time
def write(self, cr, uid, ids, vals, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
# magento_qty maybe 0, also need to be updated
if "magento_qty" in vals:
for record_id in ids:
session = ConnectorSession(cr, uid, context=context)
if session.context.get('connector_no_export'):
continue
if session.browse('magento.product.product', record_id).no_stock_sync:
continue
inventory_fields = list(set(vals).intersection(INVENTORY_FIELDS))
if inventory_fields:
export_product_inventory.delay(session, 'magento.product.product',
record_id, fields=inventory_fields,
priority=20)
return super(magento_product_product, self).write(cr, uid, ids, vals,
context=context)
def recompute_magento_qty(self, cr, uid, ids, context=None):
""" Check if the quantity in the stock location configured
on the backend has changed since the last export.
If it has changed, write the updated quantity on `magento_qty`.
The write on `magento_qty` will trigger an `on_record_write`
event that will create an export job.
It groups the products by backend to avoid to read the backend
informations for each product.
"""
if not hasattr(ids, '__iter__'):
ids = [ids]
# group products by backend
backends = defaultdict(list)
for product in self.read(cr, uid, ids, ['backend_id', 'magento_qty'],
context=context):
backends[product['backend_id'][0]].append(product)
for backend_id, products in backends.iteritems():
backend_obj = self.pool['magento.backend']
backend = backend_obj.browse(cr, uid, backend_id, context=context)
self._recompute_magento_qty_backend(cr, uid, backend, products,
context=context)
return True
def _recompute_magento_qty_backend(self, cr, uid, backend, products,
read_fields=None, context=None):
""" Recompute the products quantity for one backend.
If field names are passed in ``read_fields`` (as a list), they
will be read in the product that is used in
:meth:`~._magento_qty`.
"""
if context is None:
context = {}
if backend.product_stock_field_id:
stock_field = backend.product_stock_field_id.name
else:
stock_field = 'virtual_available'
location = backend.warehouse_id.lot_stock_id
location_ctx = context.copy()
location_ctx['location'] = location.id
product_fields = ['magento_qty', stock_field]
if read_fields:
product_fields += read_fields
product_ids = [product['id'] for product in products]
for chunk_ids in chunks(product_ids, self.RECOMPUTE_QTY_STEP):
for product in self.read(cr, uid, chunk_ids, product_fields,
context=location_ctx):
new_qty = self._magento_qty(cr, uid, product,
backend,
location,
stock_field,
context=location_ctx)
if new_qty != product['magento_qty']:
self.write(cr, uid, product['id'],
{'magento_qty': new_qty},
context=context)
def _magento_qty(self, cr, uid, product, backend, location,
stock_field, context=None):
""" Return the current quantity for one product.
Can be inherited to change the way the quantity is computed,
according to a backend / location.
If you need to read additional fields on the product, see the
``read_fields`` argument of :meth:`~._recompute_magento_qty_backend`
"""
return product[stock_field]
class product_product(orm.Model):
_inherit = 'product.product'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.product.product',
'openerp_id',
string='Magento Bindings',),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['magento_bind_ids'] = False
return super(product_product, self).copy_data(cr, uid, id,
default=default,
context=context)
@magento
class ProductProductAdapter(GenericAdapter):
_model_name = 'magento.product.product'
_magento_model = 'catalog_product'
_admin_path = '/{model}/edit/id/{id}'
def _call(self, method, arguments):
try:
return super(ProductProductAdapter, self)._call(method, arguments)
except xmlrpclib.Fault as err:
# this is the error in the Magento API
# when the product does not exist
if err.faultCode == 101:
raise IDMissingInBackend
else:
raise
def search(self, filters=None, from_date=None, to_date=None):
""" Search records according to some criteria
and returns a list of ids
:rtype: list
"""
if filters is None:
filters = {}
dt_fmt = MAGENTO_DATETIME_FORMAT
if from_date is not None:
filters.setdefault('updated_at', {})
filters['updated_at']['from'] = from_date.strftime(dt_fmt)
if to_date is not None:
filters.setdefault('updated_at', {})
filters['updated_at']['to'] = to_date.strftime(dt_fmt)
# TODO add a search entry point on the Magento API
return [int(row['product_id']) for row
in self._call('%s.list' % self._magento_model,
[filters] if filters else [{}])]
def read(self, id, storeview_id=None, attributes=None):
""" Returns the information of a record
:rtype: dict
"""
return self._call('ol_catalog_product.info',
[int(id), storeview_id, attributes, 'id'])
def write(self, id, data, storeview_id=None):
""" Update records on the external system """
# XXX actually only ol_catalog_product.update works
# the PHP connector maybe breaks the catalog_product.update
return self._call('ol_catalog_product.update',
[int(id), data, storeview_id, 'id'])
def get_images(self, id, storeview_id=None):
return self._call('product_media.list', [int(id), storeview_id, 'id'])
def read_image(self, id, image_name, storeview_id=None):
return self._call('product_media.info',
[int(id), image_name, storeview_id, 'id'])
def update_inventory(self, id, data):
# product_stock.update is too slow
return self._call('oerp_cataloginventory_stock_item.update',
[int(id), data])
@magento
class ProductBatchImport(DelayedBatchImport):
""" Import the Magento Products.
For every product category in the list, a delayed job is created.
Import from a date
"""
_model_name = ['magento.product.product']
def run(self, filters=None):
""" Run the synchronization """
from_date = filters.pop('from_date', None)
to_date = filters.pop('to_date', None)
record_ids = self.backend_adapter.search(filters,
from_date=from_date,
to_date=to_date)
_logger.info('search for magento products %s returned %s',
filters, record_ids)
for record_id in record_ids:
self._import_record(record_id)
@magento
class CatalogImageImporter(ImportSynchronizer):
""" Import images for a record.
Usually called from importers, in ``_after_import``.
For instance from the products importer.
"""
_model_name = ['magento.product.product',
]
def _get_images(self, storeview_id=None):
return self.backend_adapter.get_images(self.magento_id, storeview_id)
def _sort_images(self, images):
""" Returns a list of images sorted by their priority.
An image with the 'image' type is the the primary one.
The other images are sorted by their position.
The returned list is reversed, the items at the end
of the list have the higher priority.
"""
if not images:
return {}
# place the images where the type is 'image' first then
# sort them by the reverse priority (last item of the list has
# the the higher priority)
def priority(image):
primary = 'image' in image['types']
try:
position = int(image['position'])
except ValueError:
position = sys.maxint
return (primary, -position)
return sorted(images, key=priority)
def _get_binary_image(self, image_data):
url = image_data['url'].encode('utf8')
try:
request = urllib2.Request(url)
if self.backend_record.auth_basic_username \
and self.backend_record.auth_basic_password:
base64string = base64.encodestring(
'%s:%s' % (self.backend_record.auth_basic_username,
self.backend_record.auth_basic_password))
request.add_header("Authorization", "Basic %s" % base64string)
binary = urllib2.urlopen(request)
except urllib2.HTTPError as err:
if err.code == 404:
# the image is just missing, we skip it
return
else:
# we don't know why we couldn't download the image
# so we propagate the error, the import will fail
# and we have to check why it couldn't be accessed
raise
else:
return binary.read()
def run(self, magento_id, binding_id):
self.magento_id = magento_id
images = self._get_images()
images = self._sort_images(images)
binary = None
while not binary and images:
binary = self._get_binary_image(images.pop())
if not binary:
return
with self.session.change_context({'connector_no_export': True}):
self.session.write(self.model._name,
binding_id,
{'image': base64.b64encode(binary)})
@magento
class BundleImporter(ImportSynchronizer):
""" Can be inherited to change the way the bundle products are
imported.
Called at the end of the import of a product.
Example of action when importing a bundle product:
- Create a bill of material
- Import the structure of the bundle in new objects
By default, the bundle products are not imported: the jobs
are set as failed, because there is no known way to import them.
An additional module that implements the import should be installed.
If you want to create a custom importer for the bundles, you have to
declare the ConnectorUnit on your backend::
@magento_custom
class XBundleImporter(BundleImporter):
_model_name = 'magento.product.product'
# implement import_bundle
If you want to create a generic module that import bundles, you have
to replace the current ConnectorUnit::
@magento(replacing=BundleImporter)
class XBundleImporter(BundleImporter):
_model_name = 'magento.product.product'
# implement import_bundle
And to add the bundle type in the supported product types::
class magento_product_product(orm.Model):
_inherit = 'magento.product.product'
def product_type_get(self, cr, uid, context=None):
types = super(magento_product_product, self).product_type_get(
cr, uid, context=context)
if 'bundle' not in [item[0] for item in types]:
types.append(('bundle', 'Bundle'))
return types
"""
_model_name = 'magento.product.product'
def run(self, binding_id, magento_record):
""" Import the bundle information about a product.
:param magento_record: product information from Magento
"""
@magento
class ProductImport(MagentoImportSynchronizer):
_model_name = ['magento.product.product']
@property
def mapper(self):
if self._mapper is None:
self._mapper = self.get_connector_unit_for_model(
ProductImportMapper)
return self._mapper
def _import_bundle_dependencies(self):
""" Import the dependencies for a Bundle """
bundle = self.magento_record['_bundle_data']
for option in bundle['options']:
for selection in option['selections']:
self._import_dependency(selection['product_id'],
'magento.product.product')
def _import_dependencies(self):
""" Import the dependencies for the record"""
record = self.magento_record
# import related categories
for mag_category_id in record['categories']:
self._import_dependency(mag_category_id,
'magento.product.category')
if record['type_id'] == 'bundle':
self._import_bundle_dependencies()
def _validate_product_type(self, data):
""" Check if the product type is in the selection (so we can
prevent the `except_orm` and display a better error message).
"""
sess = self.session
product_type = data['product_type']
cr, uid, context = sess.cr, sess.uid, sess.context
product_obj = sess.pool['magento.product.product']
types = product_obj.product_type_get(cr, uid, context=context)
available_types = [typ[0] for typ in types]
if product_type not in available_types:
raise InvalidDataError("The product type '%s' is not "
"yet supported in the connector." %
product_type)
def _must_skip(self):
""" Hook called right after we read the data from the backend.
If the method returns a message giving a reason for the
skipping, the import will be interrupted and the message
recorded in the job (if the import is called directly by the
job, not by dependencies).
If it returns None, the import will continue normally.
:returns: None | str | unicode
"""
if self.magento_record['type_id'] == 'configurable':
return _('The configurable product is not imported in OpenERP, '
'because only the simple products are used in the sales '
'orders.')
def _validate_data(self, data):
""" Check if the values to import are correct
Pro-actively check before the ``_create`` or
``_update`` if some fields are missing or invalid
Raise `InvalidDataError`
"""
self._validate_product_type(data)
def _create(self, data):
openerp_binding_id = super(ProductImport, self)._create(data)
checkpoint = self.get_connector_unit_for_model(AddCheckpoint)
checkpoint.run(openerp_binding_id)
return openerp_binding_id
def _after_import(self, binding_id):
""" Hook called at the end of the import """
translation_importer = self.get_connector_unit_for_model(
TranslationImporter, self.model._name)
translation_importer.run(self.magento_id, binding_id,
mapper_class=ProductImportMapper)
image_importer = self.get_connector_unit_for_model(
CatalogImageImporter, self.model._name)
image_importer.run(self.magento_id, binding_id)
if self.magento_record['type_id'] == 'bundle':
bundle_importer = self.get_connector_unit_for_model(
BundleImporter, self.model._name)
bundle_importer.run(binding_id, self.magento_record)
@magento
class IsActiveProductImportMapper(ImportMapper):
_model_name = 'magento.product.product'
@mapping
def is_active(self, record):
"""Check if the product is active in Magento
and set active flag in OpenERP
status == 1 in Magento means active"""
return {'active': (record.get('status') == '1')}
@magento
class BundleProductImportMapper(ImportMapper):
_model_name = 'magento.product.product'
@magento
class ProductImportMapper(ImportMapper):
_model_name = 'magento.product.product'
# TODO : categ, special_price => minimal_price
direct = [('name', 'name'),
('description', 'description'),
('weight', 'weight'),
('cost', 'standard_price'),
('short_description', 'description_sale'),
('sku', 'default_code'),
('type_id', 'product_type'),
(normalize_datetime('created_at'), 'created_at'),
(normalize_datetime('updated_at'), 'updated_at'),
]
@mapping
def is_active(self, record):
mapper = self.get_connector_unit_for_model(IsActiveProductImportMapper)
return mapper.map_record(record).values()
@mapping
def price(self, record):
""" The price is imported at the creation of
the product, then it is only modified and exported
from OpenERP """
return {'list_price': record.get('price', 0.0)}
@mapping
def type(self, record):
if record['type_id'] == 'simple':
return {'type': 'product'}
return
@mapping
def website_ids(self, record):
website_ids = []
binder = self.get_binder_for_model('magento.website')
for mag_website_id in record['websites']:
website_id = binder.to_openerp(mag_website_id)
website_ids.append((4, website_id))
return {'website_ids': website_ids}
@mapping
def categories(self, record):
mag_categories = record['categories']
binder = self.get_binder_for_model('magento.product.category')
category_ids = []
main_categ_id = None
for mag_category_id in mag_categories:
cat_id = binder.to_openerp(mag_category_id, unwrap=True)
if cat_id is None:
raise MappingError("The product category with "
"magento id %s is not imported." %
mag_category_id)
category_ids.append(cat_id)
if category_ids:
main_categ_id = category_ids.pop(0)
if main_categ_id is None:
default_categ = self.backend_record.default_category_id
if default_categ:
main_categ_id = default_categ.id
result = {'categ_ids': [(6, 0, category_ids)]}
if main_categ_id: # OpenERP assign 'All Products' if not specified
result['categ_id'] = main_categ_id
return result
@mapping
def magento_id(self, record):
return {'magento_id': record['product_id']}
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
@mapping
def bundle_mapping(self, record):
if record['type_id'] == 'bundle':
bundle_mapper = self.get_connector_unit_for_model(
BundleProductImportMapper)
return bundle_mapper.map_record(record).values()
@magento
class ProductInventoryExport(ExportSynchronizer):
_model_name = ['magento.product.product']
_map_backorders = {'use_default': 0,
'no': 0,
'yes': 1,
'yes-and-notification': 2,
}
def _get_data(self, product, fields):
result = {}
if 'magento_qty' in fields:
result.update({
'qty': product.magento_qty,
# put the stock availability to "out of stock"
'is_in_stock': int(product.magento_qty > 0)
})
if 'manage_stock' in fields:
manage = product.manage_stock
result.update({
'manage_stock': int(manage == 'yes'),
'use_config_manage_stock': int(manage == 'use_default'),
})
if 'backorders' in fields:
backorders = product.backorders
result.update({
'backorders': self._map_backorders[backorders],
'use_config_backorders': int(backorders == 'use_default'),
})
return result
def run(self, binding_id, fields):
""" Export the product inventory to Magento """
product = self.session.browse(self.model._name, binding_id)
binder = self.get_binder_for_model()
magento_id = binder.to_backend(product.id)
data = self._get_data(product, fields)
self.backend_adapter.update_inventory(magento_id, data)
# fields which should not trigger an export of the products
# but an export of their inventory
INVENTORY_FIELDS = ('manage_stock',
'backorders',
'magento_qty',
)
# using the override write method
# @on_record_write(model_names='magento.product.product')
# def magento_product_modified(session, model_name, record_id, vals):
# if session.context.get('connector_no_export'):
# return
# if session.browse(model_name, record_id).no_stock_sync:
# return
# inventory_fields = list(set(vals).intersection(INVENTORY_FIELDS))
# if inventory_fields:
# export_product_inventory.delay(session, model_name,
# record_id, fields=inventory_fields,
# priority=20)
@job
@related_action(action=unwrap_binding)
def export_product_inventory(session, model_name, record_id, fields=None):
""" Export the inventory configuration and quantity of a product. """
product = session.browse(model_name, record_id)
backend_id = product.backend_id.id
env = get_environment(session, model_name, backend_id)
inventory_exporter = env.get_connector_unit(ProductInventoryExport)
return inventory_exporter.run(record_id, fields)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Config flow to configure the SimpliSafe component."""
from simplipy import API
from simplipy.errors import (
InvalidCredentialsError,
PendingAuthorizationError,
SimplipyError,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_CODE, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from . import async_get_client_id
from .const import DOMAIN, LOGGER # pylint: disable=unused-import
class SimpliSafeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a SimpliSafe config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the config flow."""
self.full_data_schema = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_CODE): str,
}
)
self.password_data_schema = vol.Schema({vol.Required(CONF_PASSWORD): str})
self._code = None
self._password = None
self._username = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return SimpliSafeOptionsFlowHandler(config_entry)
async def _async_get_simplisafe_api(self):
"""Get an authenticated SimpliSafe API client."""
client_id = await async_get_client_id(self.hass)
websession = aiohttp_client.async_get_clientsession(self.hass)
return await API.login_via_credentials(
self._username,
self._password,
client_id=client_id,
session=websession,
)
async def _async_login_during_step(self, *, step_id, form_schema):
"""Attempt to log into the API from within a config flow step."""
errors = {}
try:
simplisafe = await self._async_get_simplisafe_api()
except PendingAuthorizationError:
LOGGER.info("Awaiting confirmation of MFA email click")
return await self.async_step_mfa()
except InvalidCredentialsError:
errors = {"base": "invalid_auth"}
except SimplipyError as err:
LOGGER.error("Unknown error while logging into SimpliSafe: %s", err)
errors = {"base": "unknown"}
if errors:
return self.async_show_form(
step_id=step_id,
data_schema=form_schema,
errors=errors,
)
return await self.async_step_finish(
{
CONF_USERNAME: self._username,
CONF_TOKEN: simplisafe.refresh_token,
CONF_CODE: self._code,
}
)
async def async_step_finish(self, user_input=None):
"""Handle finish config entry setup."""
existing_entry = await self.async_set_unique_id(self._username)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=user_input)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=self._username, data=user_input)
async def async_step_mfa(self, user_input=None):
"""Handle multi-factor auth confirmation."""
if user_input is None:
return self.async_show_form(step_id="mfa")
try:
simplisafe = await self._async_get_simplisafe_api()
except PendingAuthorizationError:
LOGGER.error("Still awaiting confirmation of MFA email click")
return self.async_show_form(
step_id="mfa", errors={"base": "still_awaiting_mfa"}
)
return await self.async_step_finish(
{
CONF_USERNAME: self._username,
CONF_TOKEN: simplisafe.refresh_token,
CONF_CODE: self._code,
}
)
async def async_step_reauth(self, config):
"""Handle configuration by re-auth."""
self._code = config.get(CONF_CODE)
self._username = config[CONF_USERNAME]
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Handle re-auth completion."""
if not user_input:
return self.async_show_form(
step_id="reauth_confirm", data_schema=self.password_data_schema
)
self._password = user_input[CONF_PASSWORD]
return await self._async_login_during_step(
step_id="reauth_confirm", form_schema=self.password_data_schema
)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self.async_show_form(
step_id="user", data_schema=self.full_data_schema
)
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
self._code = user_input.get(CONF_CODE)
self._password = user_input[CONF_PASSWORD]
self._username = user_input[CONF_USERNAME]
return await self._async_login_during_step(
step_id="user", form_schema=self.full_data_schema
)
class SimpliSafeOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a SimpliSafe options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_CODE,
default=self.config_entry.options.get(CONF_CODE),
): str
}
),
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.core.aop.ataspectj.aopataspectjexample;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.EnableAspectJAutoProxy;
// tag::snippet[]
@Configuration
@EnableAspectJAutoProxy
public class ApplicationConfiguration {
@Bean
public ConcurrentOperationExecutor concurrentOperationExecutor() {
ConcurrentOperationExecutor executor = new ConcurrentOperationExecutor();
executor.setMaxRetries(3);
executor.setOrder(100);
return executor;
}
}
// end::snippet[]
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
framework-docs/src/main/java/org/springframework/docs/core/aop/ataspectj/aopataspectjexample/ApplicationConfiguration.java
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationsample.immutable;
import org.springframework.boot.configurationsample.TestNestedConfigurationProperty;
import org.springframework.boot.configurationsample.specific.SimplePojo;
/**
* Inner properties, in immutable format.
*
* @author Stephane Nicoll
*/
public class ImmutableInnerClassProperties {
private final Foo first;
private Foo second;
@TestNestedConfigurationProperty
private final SimplePojo third;
private final Fourth fourth;
public ImmutableInnerClassProperties(Foo first, Foo second, SimplePojo third, Fourth fourth) {
this.first = first;
this.second = second;
this.third = third;
this.fourth = fourth;
}
public Foo getFirst() {
return this.first;
}
public Foo getTheSecond() {
return this.second;
}
public void setTheSecond(Foo second) {
this.second = second;
}
public SimplePojo getThird() {
return this.third;
}
public Fourth getFourth() {
return this.fourth;
}
public static class Foo {
private String name;
private final Bar bar = new Bar();
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public Bar getBar() {
return this.bar;
}
public static class Bar {
private String name;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
}
}
public enum Fourth {
YES, NO
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/immutable/ImmutableInnerClassProperties.java
|
import os
import sys
def get_console_type(use_colors=True):
if use_colors and sys.stdout.isatty() and sys.stderr.isatty():
import platform
if platform.system() == 'Windows':
import lixian_colors_win32
return lixian_colors_win32.WinConsole
else:
import lixian_colors_linux
return lixian_colors_linux.AnsiConsole
else:
import lixian_colors_console
return lixian_colors_console.Console
console_type = get_console_type()
raw_console_type = get_console_type(False)
def Console(use_colors=True):
return get_console_type(use_colors)()
def get_softspace(output):
if hasattr(output, 'softspace'):
return output.softspace
import lixian_colors_console
if isinstance(output, lixian_colors_console.Console):
return get_softspace(output.output)
return 0
class ScopedColors(console_type):
def __init__(self, *args):
console_type.__init__(self, *args)
def __call__(self):
console = self
class Scoped:
def __enter__(self):
self.stdout = sys.stdout
softspace = get_softspace(sys.stdout)
sys.stdout = console
sys.stdout.softspace = softspace
def __exit__(self, type, value, traceback):
softspace = get_softspace(sys.stdout)
sys.stdout = self.stdout
sys.stdout.softspace = softspace
return Scoped()
class RawScopedColors(raw_console_type):
def __init__(self, *args):
raw_console_type.__init__(self, *args)
def __call__(self):
class Scoped:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
return Scoped()
class RootColors:
def __init__(self, use_colors=True):
self.use_colors = use_colors
def __getattr__(self, name):
return getattr(ScopedColors() if self.use_colors else RawScopedColors(), name)
def __call__(self, use_colors):
assert use_colors in (True, False, None), use_colors
return RootColors(use_colors)
colors = RootColors()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import NotFittedError, check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calcuate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
exp_doc_topic_d = exp_doc_topic[idx_d, :]
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (doc_topic_prior + exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
exp_doc_topic_d = _dirichlet_expectation_1d(doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
n_jobs = _get_n_jobs(self.n_jobs)
random_state = self.random_state_ if random_init else None
results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=self.total_samples,
batch_update=False)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False)
else:
# batch update
self._em_step(X, total_samples=n_samples, batch_update=True)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f' % (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calcuate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package logical
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/hashicorp/errwrap"
metrics "github.com/hashicorp/go-metrics/compat"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/sdk/helper/consts"
)
// RespondErrorCommon pulls most of the functionality from http's
// respondErrorCommon and some of http's handleLogical and makes it available
// to both the http package and elsewhere.
func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
if err == nil && (resp == nil || !resp.IsError()) {
switch {
case req.Operation == ReadOperation || req.Operation == HeaderOperation:
if resp == nil {
return http.StatusNotFound, nil
}
// Basically: if we have empty "keys" or no keys at all, 404. This
// provides consistency with GET.
case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil):
if resp == nil {
return http.StatusNotFound, nil
}
if len(resp.Data) == 0 {
if len(resp.Warnings) > 0 {
return 0, nil
}
return http.StatusNotFound, nil
}
keysRaw, ok := resp.Data["keys"]
if !ok || keysRaw == nil {
// If we don't have keys but have other data, return as-is
if len(resp.Data) > 0 || len(resp.Warnings) > 0 {
return 0, nil
}
return http.StatusNotFound, nil
}
var keys []string
switch keysRaw.(type) {
case []interface{}:
keys = make([]string, len(keysRaw.([]interface{})))
for i, el := range keysRaw.([]interface{}) {
s, ok := el.(string)
if !ok {
return http.StatusInternalServerError, nil
}
keys[i] = s
}
case []string:
keys = keysRaw.([]string)
default:
return http.StatusInternalServerError, nil
}
if len(keys) == 0 {
return http.StatusNotFound, nil
}
}
return 0, nil
}
if errwrap.ContainsType(err, new(ReplicationCodedError)) {
var allErrors error
var codedErr *ReplicationCodedError
errwrap.Walk(err, func(inErr error) {
// The Walk function does not just traverse leaves, and execute the
// callback function on the entire error first. So, if the error is
// of type multierror.Error, we may want to skip storing the entire
// error first to avoid adding duplicate errors when walking down
// the leaf errors
if _, ok := inErr.(*multierror.Error); ok {
return
}
newErr, ok := inErr.(*ReplicationCodedError)
if ok {
codedErr = newErr
} else {
// if the error is of type fmt.wrapError which is typically
// made by calling fmt.Errorf("... %w", err), allErrors will
// contain duplicated error messages
allErrors = multierror.Append(allErrors, inErr)
}
})
if allErrors != nil {
return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors)
}
return codedErr.Code, errors.New(codedErr.Msg)
}
// Start out with internal server error since in most of these cases there
// won't be a response so this won't be overridden
statusCode := http.StatusInternalServerError
// If we actually have a response, start out with bad request
if resp != nil {
statusCode = http.StatusBadRequest
}
// Now, check the error itself; if it has a specific logical error, set the
// appropriate code
if err != nil {
switch {
case errwrap.Contains(err, consts.ErrOverloaded.Error()):
statusCode = http.StatusServiceUnavailable
case errwrap.ContainsType(err, new(StatusBadRequest)):
statusCode = http.StatusBadRequest
case errwrap.Contains(err, ErrPermissionDenied.Error()):
statusCode = http.StatusForbidden
case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()):
statusCode = http.StatusBadRequest
case errwrap.Contains(err, ErrUnsupportedOperation.Error()):
statusCode = http.StatusMethodNotAllowed
case errwrap.Contains(err, ErrUnsupportedPath.Error()):
statusCode = http.StatusNotFound
case errwrap.Contains(err, ErrInvalidRequest.Error()):
statusCode = http.StatusBadRequest
case errwrap.Contains(err, ErrUpstreamRateLimited.Error()):
statusCode = http.StatusBadGateway
case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()):
statusCode = http.StatusTooManyRequests
case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()):
statusCode = http.StatusTooManyRequests
case errwrap.Contains(err, ErrMissingRequiredState.Error()):
statusCode = http.StatusPreconditionFailed
case errwrap.Contains(err, ErrPathFunctionalityRemoved.Error()):
statusCode = http.StatusNotFound
case errwrap.Contains(err, ErrRelativePath.Error()):
statusCode = http.StatusBadRequest
case errwrap.Contains(err, ErrInvalidCredentials.Error()):
statusCode = http.StatusBadRequest
case errors.Is(err, ErrNotFound):
statusCode = http.StatusNotFound
}
}
if respErr := resp.Error(); respErr != nil {
err = fmt.Errorf("%s", respErr.Error())
// Don't let other error codes override the overloaded status code
if strings.Contains(respErr.Error(), consts.ErrOverloaded.Error()) {
statusCode = http.StatusServiceUnavailable
}
}
return statusCode, err
}
// AdjustErrorStatusCode adjusts the status that will be sent in error
// conditions in a way that can be shared across http's respondError and other
// locations.
func AdjustErrorStatusCode(status *int, err error) {
// Handle nested errors
if t, ok := err.(*multierror.Error); ok {
for _, e := range t.Errors {
AdjustErrorStatusCode(status, e)
}
}
// Adjust status code when overloaded
if errwrap.Contains(err, consts.ErrOverloaded.Error()) {
*status = http.StatusServiceUnavailable
}
// Adjust status code when sealed
if errwrap.Contains(err, consts.ErrSealed.Error()) {
*status = http.StatusServiceUnavailable
}
if errwrap.Contains(err, consts.ErrAPILocked.Error()) {
*status = http.StatusServiceUnavailable
}
// Adjust status code on
if errwrap.Contains(err, "http: request body too large") {
*status = http.StatusRequestEntityTooLarge
}
// Allow HTTPCoded error passthrough to specify a code
if t, ok := err.(HTTPCodedError); ok {
*status = t.Code()
}
}
func RespondError(w http.ResponseWriter, status int, err error) {
AdjustErrorStatusCode(&status, err)
defer IncrementResponseStatusCodeMetric(status)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
type ErrorResponse struct {
Errors []string `json:"errors"`
}
resp := &ErrorResponse{Errors: make([]string, 0, 1)}
if err != nil {
resp.Errors = append(resp.Errors, err.Error())
}
enc := json.NewEncoder(w)
enc.Encode(resp)
}
func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) {
AdjustErrorStatusCode(&status, err)
defer IncrementResponseStatusCodeMetric(status)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
type ErrorAndDataResponse struct {
Errors []string `json:"errors"`
Data interface{} `json:"data"`
}
resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)}
if err != nil {
resp.Errors = append(resp.Errors, err.Error())
}
resp.Data = data
enc := json.NewEncoder(w)
enc.Encode(resp)
}
func IncrementResponseStatusCodeMetric(statusCode int) {
statusString := strconv.Itoa(statusCode)
statusType := fmt.Sprintf("%cxx", statusString[0])
metrics.IncrCounterWithLabels([]string{"core", "response_status_code"},
1,
[]metrics.Label{
{"code", statusString},
{"type", statusType},
})
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
sdk/logical/response_util.go
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.option_not_yet_implemented('-W', 'foo .')
test.option_not_yet_implemented('--what-if', '=foo .')
test.option_not_yet_implemented('--new-file', '=foo .')
test.option_not_yet_implemented('--assume-new', '=foo .')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
import asyncio
import logging
import platform
import re
import signal
import subprocess
import sys
import warnings
from abc import ABC, abstractmethod
from collections.abc import Generator
from pathlib import Path
from typing import Any
import pytest
from packaging.version import parse as parse_version
from pexpect.popen_spawn import PopenSpawn
from twisted.internet.defer import Deferred
from w3lib import __version__ as w3lib_version
from zope.interface.exceptions import MultipleInvalid
import scrapy
from scrapy import Spider
from scrapy.crawler import (
AsyncCrawlerProcess,
AsyncCrawlerRunner,
Crawler,
CrawlerProcess,
CrawlerRunner,
)
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.extensions.throttle import AutoThrottle
from scrapy.settings import Settings, default_settings
from scrapy.utils.asyncio import call_later
from scrapy.utils.defer import deferred_from_coro, maybe_deferred_to_future
from scrapy.utils.log import (
_uninstall_scrapy_root_handler,
configure_logging,
get_scrapy_root_handler,
)
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler, get_reactor_settings
from tests.mockserver.http import MockServer
from tests.utils import get_script_run_env
from tests.utils.decorators import coroutine_test, inline_callbacks_test
BASE_SETTINGS: dict[str, Any] = {}
def get_raw_crawler(spidercls=None, settings_dict=None):
"""get_crawler alternative that only calls the __init__ method of the
crawler."""
settings = Settings()
settings.setdict(get_reactor_settings())
settings.setdict(settings_dict or {})
return Crawler(spidercls or DefaultSpider, settings)
class TestBaseCrawler:
def assertOptionIsDefault(self, settings, key):
assert isinstance(settings, Settings)
assert settings[key] == getattr(default_settings, key)
class TestCrawler(TestBaseCrawler):
def test_populate_spidercls_settings(self):
spider_settings = {"TEST1": "spider", "TEST2": "spider"}
project_settings = {
**BASE_SETTINGS,
"TEST1": "project",
"TEST3": "project",
**get_reactor_settings(),
}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority="project")
crawler = Crawler(CustomSettingsSpider, settings)
crawler._apply_settings()
assert crawler.settings.get("TEST1") == "spider"
assert crawler.settings.get("TEST2") == "spider"
assert crawler.settings.get("TEST3") == "project"
assert not settings.frozen
assert crawler.settings.frozen
def test_crawler_accepts_dict(self):
crawler = get_crawler(DefaultSpider, {"foo": "bar"})
assert crawler.settings["foo"] == "bar"
self.assertOptionIsDefault(crawler.settings, "RETRY_ENABLED")
def test_crawler_accepts_None(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, "RETRY_ENABLED")
def test_crawler_rejects_spider_objects(self):
with pytest.raises(ValueError, match="spidercls argument must be a class"):
Crawler(DefaultSpider())
@inline_callbacks_test
def test_crawler_crawl_twice_seq_unsupported(self):
crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS)
yield crawler.crawl()
with pytest.raises(RuntimeError, match="more than once on the same instance"):
yield crawler.crawl()
@coroutine_test
async def test_crawler_crawl_async_twice_seq_unsupported(self):
crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS)
await crawler.crawl_async()
with pytest.raises(RuntimeError, match="more than once on the same instance"):
await crawler.crawl_async()
@inline_callbacks_test
def test_crawler_crawl_twice_parallel_unsupported(self):
crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS)
d1 = crawler.crawl()
d2 = crawler.crawl()
yield d1
with pytest.raises(RuntimeError, match="Crawling already taking place"):
yield d2
@pytest.mark.only_asyncio
@coroutine_test
async def test_crawler_crawl_async_twice_parallel_unsupported(self):
crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS)
t1 = asyncio.create_task(crawler.crawl_async())
t2 = asyncio.create_task(crawler.crawl_async())
await t1
with pytest.raises(RuntimeError, match="Crawling already taking place"):
await t2
def test_get_addon(self):
class ParentAddon:
pass
class TrackingAddon(ParentAddon):
instances = []
def __init__(self):
TrackingAddon.instances.append(self)
def update_settings(self, settings):
pass
settings = {
**BASE_SETTINGS,
"ADDONS": {
TrackingAddon: 0,
},
}
crawler = get_crawler(settings_dict=settings)
assert len(TrackingAddon.instances) == 1
expected = TrackingAddon.instances[-1]
addon = crawler.get_addon(TrackingAddon)
assert addon == expected
addon = crawler.get_addon(DefaultSpider)
assert addon is None
addon = crawler.get_addon(ParentAddon)
assert addon == expected
class ChildAddon(TrackingAddon):
pass
addon = crawler.get_addon(ChildAddon)
assert addon is None
@inline_callbacks_test
def test_get_downloader_middleware(self):
class ParentDownloaderMiddleware:
pass
class TrackingDownloaderMiddleware(ParentDownloaderMiddleware):
instances = []
def __init__(self):
TrackingDownloaderMiddleware.instances.append(self)
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
return cls(crawler=crawler)
def __init__(self, crawler, **kwargs: Any):
super().__init__(**kwargs)
self.crawler = crawler
async def start(self):
MySpider.result = crawler.get_downloader_middleware(MySpider.cls)
return
yield
settings = {
**BASE_SETTINGS,
"DOWNLOADER_MIDDLEWARES": {
TrackingDownloaderMiddleware: 0,
},
}
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = TrackingDownloaderMiddleware
yield crawler.crawl()
assert len(TrackingDownloaderMiddleware.instances) == 1
assert MySpider.result == TrackingDownloaderMiddleware.instances[-1]
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = DefaultSpider
yield crawler.crawl()
assert MySpider.result is None
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ParentDownloaderMiddleware
yield crawler.crawl()
assert MySpider.result == TrackingDownloaderMiddleware.instances[-1]
class ChildDownloaderMiddleware(TrackingDownloaderMiddleware):
pass
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ChildDownloaderMiddleware
yield crawler.crawl()
assert MySpider.result is None
def test_get_downloader_middleware_not_crawling(self):
crawler = get_raw_crawler(settings_dict=BASE_SETTINGS)
with pytest.raises(RuntimeError):
crawler.get_downloader_middleware(DefaultSpider)
@inline_callbacks_test
def test_get_downloader_middleware_no_engine(self):
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
try:
crawler.get_downloader_middleware(DefaultSpider)
except Exception as e:
MySpider.result = e
raise
crawler = get_raw_crawler(MySpider, BASE_SETTINGS)
with pytest.raises(RuntimeError):
yield crawler.crawl()
@inline_callbacks_test
def test_get_extension(self):
class ParentExtension:
pass
class TrackingExtension(ParentExtension):
instances = []
def __init__(self):
TrackingExtension.instances.append(self)
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
return cls(crawler=crawler)
def __init__(self, crawler, **kwargs: Any):
super().__init__(**kwargs)
self.crawler = crawler
async def start(self):
MySpider.result = crawler.get_extension(MySpider.cls)
return
yield
settings = {
**BASE_SETTINGS,
"EXTENSIONS": {
TrackingExtension: 0,
},
}
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = TrackingExtension
yield crawler.crawl()
assert len(TrackingExtension.instances) == 1
assert MySpider.result == TrackingExtension.instances[-1]
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = DefaultSpider
yield crawler.crawl()
assert MySpider.result is None
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ParentExtension
yield crawler.crawl()
assert MySpider.result == TrackingExtension.instances[-1]
class ChildExtension(TrackingExtension):
pass
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ChildExtension
yield crawler.crawl()
assert MySpider.result is None
def test_get_extension_not_crawling(self):
crawler = get_raw_crawler(settings_dict=BASE_SETTINGS)
with pytest.raises(RuntimeError):
crawler.get_extension(DefaultSpider)
@inline_callbacks_test
def test_get_extension_no_engine(self):
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
try:
crawler.get_extension(DefaultSpider)
except Exception as e:
MySpider.result = e
raise
crawler = get_raw_crawler(MySpider, BASE_SETTINGS)
with pytest.raises(RuntimeError):
yield crawler.crawl()
@inline_callbacks_test
def test_get_item_pipeline(self):
class ParentItemPipeline:
pass
class TrackingItemPipeline(ParentItemPipeline):
instances = []
def __init__(self):
TrackingItemPipeline.instances.append(self)
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
return cls(crawler=crawler)
def __init__(self, crawler, **kwargs: Any):
super().__init__(**kwargs)
self.crawler = crawler
async def start(self):
MySpider.result = crawler.get_item_pipeline(MySpider.cls)
return
yield
settings = {
**BASE_SETTINGS,
"ITEM_PIPELINES": {
TrackingItemPipeline: 0,
},
}
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = TrackingItemPipeline
yield crawler.crawl()
assert len(TrackingItemPipeline.instances) == 1
assert MySpider.result == TrackingItemPipeline.instances[-1]
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = DefaultSpider
yield crawler.crawl()
assert MySpider.result is None
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ParentItemPipeline
yield crawler.crawl()
assert MySpider.result == TrackingItemPipeline.instances[-1]
class ChildItemPipeline(TrackingItemPipeline):
pass
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ChildItemPipeline
yield crawler.crawl()
assert MySpider.result is None
def test_get_item_pipeline_not_crawling(self):
crawler = get_raw_crawler(settings_dict=BASE_SETTINGS)
with pytest.raises(RuntimeError):
crawler.get_item_pipeline(DefaultSpider)
@inline_callbacks_test
def test_get_item_pipeline_no_engine(self):
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
try:
crawler.get_item_pipeline(DefaultSpider)
except Exception as e:
MySpider.result = e
raise
crawler = get_raw_crawler(MySpider, BASE_SETTINGS)
with pytest.raises(RuntimeError):
yield crawler.crawl()
@inline_callbacks_test
def test_get_spider_middleware(self):
class ParentSpiderMiddleware:
pass
class TrackingSpiderMiddleware(ParentSpiderMiddleware):
instances = []
def __init__(self):
TrackingSpiderMiddleware.instances.append(self)
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
return cls(crawler=crawler)
def __init__(self, crawler, **kwargs: Any):
super().__init__(**kwargs)
self.crawler = crawler
async def start(self):
MySpider.result = crawler.get_spider_middleware(MySpider.cls)
return
yield
settings = {
**BASE_SETTINGS,
"SPIDER_MIDDLEWARES": {
TrackingSpiderMiddleware: 0,
},
}
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = TrackingSpiderMiddleware
yield crawler.crawl()
assert len(TrackingSpiderMiddleware.instances) == 1
assert MySpider.result == TrackingSpiderMiddleware.instances[-1]
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = DefaultSpider
yield crawler.crawl()
assert MySpider.result is None
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ParentSpiderMiddleware
yield crawler.crawl()
assert MySpider.result == TrackingSpiderMiddleware.instances[-1]
class ChildSpiderMiddleware(TrackingSpiderMiddleware):
pass
crawler = get_raw_crawler(MySpider, settings)
MySpider.cls = ChildSpiderMiddleware
yield crawler.crawl()
assert MySpider.result is None
def test_get_spider_middleware_not_crawling(self):
crawler = get_raw_crawler(settings_dict=BASE_SETTINGS)
with pytest.raises(RuntimeError):
crawler.get_spider_middleware(DefaultSpider)
@inline_callbacks_test
def test_get_spider_middleware_no_engine(self):
class MySpider(Spider):
name = "myspider"
@classmethod
def from_crawler(cls, crawler):
try:
crawler.get_spider_middleware(DefaultSpider)
except Exception as e:
MySpider.result = e
raise
crawler = get_raw_crawler(MySpider, BASE_SETTINGS)
with pytest.raises(RuntimeError):
yield crawler.crawl()
class TestSpiderSettings:
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = "spider"
custom_settings = {"AUTOTHROTTLE_ENABLED": True}
crawler = get_crawler(MySpider)
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
assert AutoThrottle in enabled_exts
class TestCrawlerLogging:
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = "spider"
get_crawler(MySpider)
assert get_scrapy_root_handler() is None
@coroutine_test
async def test_spider_custom_settings_log_level(self, tmp_path):
log_file = Path(tmp_path, "log.txt")
log_file.write_text("previous message\n", encoding="utf-8")
info_count = None
class MySpider(scrapy.Spider):
name = "spider"
custom_settings = {
"LOG_LEVEL": "INFO",
"LOG_FILE": str(log_file),
}
async def start(self):
info_count_start = crawler.stats.get_value("log_count/INFO")
logging.debug("debug message") # noqa: LOG015
logging.info("info message") # noqa: LOG015
logging.warning("warning message") # noqa: LOG015
logging.error("error message") # noqa: LOG015
nonlocal info_count
info_count = (
crawler.stats.get_value("log_count/INFO") - info_count_start
)
return
yield
try:
configure_logging()
assert get_scrapy_root_handler().level == logging.DEBUG
crawler = get_crawler(MySpider)
assert get_scrapy_root_handler().level == logging.INFO
await crawler.crawl_async()
finally:
_uninstall_scrapy_root_handler()
logged = log_file.read_text(encoding="utf-8")
assert "previous message" in logged
assert "debug message" not in logged
assert "info message" in logged
assert "warning message" in logged
assert "error message" in logged
assert crawler.stats.get_value("log_count/ERROR") == 1
assert crawler.stats.get_value("log_count/WARNING") == 1
assert info_count == 1
assert crawler.stats.get_value("log_count/DEBUG", 0) == 0
def test_spider_custom_settings_log_append(self, tmp_path):
log_file = Path(tmp_path, "log.txt")
log_file.write_text("previous message\n", encoding="utf-8")
class MySpider(scrapy.Spider):
name = "spider"
custom_settings = {
"LOG_FILE": str(log_file),
"LOG_FILE_APPEND": False,
}
try:
configure_logging()
get_crawler(MySpider)
logging.debug("debug message") # noqa: LOG015
finally:
_uninstall_scrapy_root_handler()
logged = log_file.read_text(encoding="utf-8")
assert "previous message" not in logged
assert "debug message" in logged
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class TestCrawlerRunner(TestBaseCrawler):
def test_spider_manager_verify_interface(self):
settings = Settings(
{
"SPIDER_LOADER_CLASS": SpiderLoaderWithWrongInterface,
}
)
with pytest.raises(MultipleInvalid):
CrawlerRunner(settings)
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({"foo": "bar"})
assert runner.settings["foo"] == "bar"
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
class TestAsyncCrawlerRunner(TestBaseCrawler):
def test_spider_manager_verify_interface(self):
settings = Settings(
{
"SPIDER_LOADER_CLASS": SpiderLoaderWithWrongInterface,
}
)
with pytest.raises(MultipleInvalid):
AsyncCrawlerRunner(settings)
def test_crawler_runner_accepts_dict(self):
runner = AsyncCrawlerRunner({"foo": "bar"})
assert runner.settings["foo"] == "bar"
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
def test_crawler_runner_accepts_None(self):
runner = AsyncCrawlerRunner()
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
class TestCrawlerProcess(TestBaseCrawler):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({"foo": "bar"}, install_root_handler=False)
assert runner.settings["foo"] == "bar"
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess(install_root_handler=False)
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
@pytest.mark.only_asyncio
class TestAsyncCrawlerProcess(TestBaseCrawler):
def test_crawler_process_accepts_dict(self, reactor_pytest: str) -> None:
runner = AsyncCrawlerProcess(
{"foo": "bar", "TWISTED_ENABLED": reactor_pytest != "none"},
install_root_handler=False,
)
assert runner.settings["foo"] == "bar"
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
@pytest.mark.requires_reactor # can't pass TWISTED_ENABLED=False
def test_crawler_process_accepts_None(self) -> None:
runner = AsyncCrawlerProcess(install_root_handler=False)
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
class ExceptionSpider(scrapy.Spider):
name = "exception"
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError("Exception in from_crawler method")
class NoRequestsSpider(scrapy.Spider):
name = "no_request"
async def start(self):
return
yield
@pytest.mark.requires_reactor
class TestCrawlerRunnerHasSpider:
@staticmethod
def _runner():
return CrawlerRunner(get_reactor_settings())
@staticmethod
def _crawl(runner, spider):
return runner.crawl(spider)
@inline_callbacks_test
def test_crawler_runner_bootstrap_successful(self):
runner = self._runner()
yield self._crawl(runner, NoRequestsSpider)
assert not runner.bootstrap_failed
@inline_callbacks_test
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = self._runner()
yield self._crawl(runner, NoRequestsSpider)
yield self._crawl(runner, NoRequestsSpider)
assert not runner.bootstrap_failed
@inline_callbacks_test
def test_crawler_runner_bootstrap_failed(self):
runner = self._runner()
try:
yield self._crawl(runner, ExceptionSpider)
except ValueError:
pass
else:
pytest.fail("Exception should be raised from spider")
assert runner.bootstrap_failed
@inline_callbacks_test
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = self._runner()
try:
yield self._crawl(runner, ExceptionSpider)
except ValueError:
pass
else:
pytest.fail("Exception should be raised from spider")
yield self._crawl(runner, NoRequestsSpider)
assert runner.bootstrap_failed
@inline_callbacks_test
def test_crawler_runner_asyncio_enabled_true(
self, reactor_pytest: str
) -> Generator[Deferred[Any], Any, None]:
if reactor_pytest != "asyncio":
runner = CrawlerRunner(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
)
with pytest.raises(
Exception,
match=r"The installed reactor \(.*?\) does not match the requested one \(.*?\)",
):
yield self._crawl(runner, NoRequestsSpider)
else:
CrawlerRunner(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
)
@pytest.mark.only_asyncio
class TestAsyncCrawlerRunnerHasSpider(TestCrawlerRunnerHasSpider):
@staticmethod
def _runner():
return AsyncCrawlerRunner(get_reactor_settings())
@staticmethod
def _crawl(runner, spider):
return deferred_from_coro(runner.crawl(spider))
def test_crawler_runner_asyncio_enabled_true(self):
pytest.skip("This test is only for CrawlerRunner")
class ScriptRunnerMixin(ABC):
@property
@abstractmethod
def script_dir(self) -> Path:
raise NotImplementedError
@staticmethod
def get_script_dir(name: str) -> Path:
return Path(__file__).parent.resolve() / name
def get_script_args(self, script_name: str, *script_args: str) -> list[str]:
script_path = self.script_dir / script_name
return [sys.executable, str(script_path), *script_args]
def run_script(self, script_name: str, *script_args: str) -> str:
args = self.get_script_args(script_name, *script_args)
p = subprocess.Popen(
args,
env=get_script_run_env(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = p.communicate()
return stderr.decode("utf-8")
class TestCrawlerProcessSubprocessBase(ScriptRunnerMixin):
"""Common tests between CrawlerProcess and AsyncCrawlerProcess,
with the same file names and expectations.
"""
def test_simple(self):
log = self.run_script("simple.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "is_reactorless(): False" in log
def test_multi(self):
log = self.run_script("multi.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "ReactorAlreadyInstalledError" not in log
def test_reactor_default(self):
log = self.run_script("reactor_default.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the requested one "
"(twisted.internet.asyncioreactor.AsyncioSelectorReactor)"
) in log
def test_asyncio_enabled_no_reactor(self):
log = self.run_script("asyncio_enabled_no_reactor.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "RuntimeError" not in log
def test_asyncio_enabled_reactor(self):
log = self.run_script("asyncio_enabled_reactor.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "RuntimeError" not in log
@pytest.mark.skipif(
parse_version(w3lib_version) >= parse_version("2.0.0"),
reason="w3lib 2.0.0 and later do not allow invalid domains.",
)
def test_ipv6_default_name_resolver(self):
log = self.run_script("default_name_resolver.py")
assert "Spider closed (finished)" in log
assert (
"'downloader/exception_type_count/scrapy.exceptions.CannotResolveHostError': 1,"
in log
)
assert (
"scrapy.exceptions.CannotResolveHostError: DNS lookup failed: no results for hostname lookup: ::1."
in log
)
def test_caching_hostname_resolver_ipv6(self):
log = self.run_script("caching_hostname_resolver_ipv6.py")
assert "Spider closed (finished)" in log
assert "scrapy.exceptions.CannotResolveHostError" not in log
def test_caching_hostname_resolver_finite_execution(
self, mockserver: MockServer
) -> None:
log = self.run_script("caching_hostname_resolver.py", mockserver.url("/"))
assert "Spider closed (finished)" in log
assert "ERROR: Error downloading" not in log
assert "TimeoutError" not in log
assert "scrapy.exceptions.CannotResolveHostError" not in log
def test_twisted_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_twisted_reactor_asyncio_custom_settings(self):
log = self.run_script("twisted_reactor_custom_settings.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_twisted_reactor_asyncio_custom_settings_same(self):
log = self.run_script("twisted_reactor_custom_settings_same.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
@pytest.mark.requires_uvloop
def test_custom_loop_asyncio(self):
log = self.run_script("asyncio_custom_loop.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
@pytest.mark.requires_uvloop
def test_custom_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py", "uvloop.Loop")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
assert "async pipeline opened!" in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_same_loop(self):
log = self.run_script("asyncio_enabled_reactor_same_loop.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_different_loop(self):
log = self.run_script("asyncio_enabled_reactor_different_loop.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the one specified in the ASYNCIO_EVENT_LOOP "
"setting (uvloop.Loop)"
) in log
def test_default_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" not in log
assert "async pipeline opened!" in log
def test_args_change_settings(self):
log = self.run_script("args_settings.py")
assert "Spider closed (finished)" in log
assert "The value of FOO is 42" in log
def test_shutdown_graceful(self):
sig = signal.SIGINT if sys.platform != "win32" else signal.SIGBREAK
args = self.get_script_args("sleeping.py", "3")
p = PopenSpawn(args, timeout=5)
p.expect_exact("Spider opened")
p.expect_exact("Crawled (200)")
p.kill(sig)
p.expect_exact("shutting down gracefully")
p.expect_exact("Spider closed (shutdown)")
p.wait()
@inline_callbacks_test
def test_shutdown_forced(self):
sig = signal.SIGINT if sys.platform != "win32" else signal.SIGBREAK
args = self.get_script_args("sleeping.py", "10")
p = PopenSpawn(args, timeout=5)
p.expect_exact("Spider opened")
p.expect_exact("Crawled (200)")
p.kill(sig)
p.expect_exact("shutting down gracefully")
# sending the second signal too fast often causes problems
d = Deferred()
call_later(0.01, d.callback, None)
yield d
p.kill(sig)
p.expect_exact("forcing unclean shutdown")
p.wait()
class TestCrawlerProcessSubprocess(TestCrawlerProcessSubprocessBase):
@property
def script_dir(self) -> Path:
return self.get_script_dir("CrawlerProcess")
def test_reactor_default_twisted_reactor_select(self):
log = self.run_script("reactor_default_twisted_reactor_select.py")
if platform.system() in ["Windows", "Darwin"]:
# The goal of this test function is to test that, when a reactor is
# installed (the default one here) and a different reactor is
# configured (select here), an error raises.
#
# In Windows the default reactor is the select reactor, so that
# error does not raise.
#
# If that ever becomes the case on more platforms (i.e. if Linux
# also starts using the select reactor by default in a future
# version of Twisted), then we will need to rethink this test.
assert "Spider closed (finished)" in log
else:
assert "Spider closed (finished)" not in log
assert (
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
) in log
def test_reactor_select(self):
log = self.run_script("reactor_select.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the requested one "
"(twisted.internet.asyncioreactor.AsyncioSelectorReactor)"
) in log
def test_reactor_select_twisted_reactor_select(self):
log = self.run_script("reactor_select_twisted_reactor_select.py")
assert "Spider closed (finished)" in log
assert "ReactorAlreadyInstalledError" not in log
def test_reactor_select_subclass_twisted_reactor_select(self):
log = self.run_script("reactor_select_subclass_twisted_reactor_select.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
) in log
def test_twisted_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
assert "Spider closed (finished)" in log
assert "Using reactor: twisted.internet.selectreactor.SelectReactor" in log
@pytest.mark.skipif(
platform.system() == "Windows", reason="PollReactor is not supported on Windows"
)
def test_twisted_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
assert "Spider closed (finished)" in log
assert "Using reactor: twisted.internet.pollreactor.PollReactor" in log
def test_twisted_reactor_asyncio_custom_settings_conflict(self):
log = self.run_script("twisted_reactor_custom_settings_conflict.py")
assert "Using reactor: twisted.internet.selectreactor.SelectReactor" in log
assert (
"(twisted.internet.selectreactor.SelectReactor) does not match the requested one"
in log
)
def test_reactorless(self):
log = self.run_script("reactorless.py")
assert (
"RuntimeError: CrawlerProcess doesn't support TWISTED_ENABLED=False" in log
)
class TestAsyncCrawlerProcessSubprocess(TestCrawlerProcessSubprocessBase):
@property
def script_dir(self) -> Path:
return self.get_script_dir("AsyncCrawlerProcess")
def test_twisted_reactor_custom_settings_select(self):
log = self.run_script("twisted_reactor_custom_settings_select.py")
assert "Spider closed (finished)" not in log
assert (
"(twisted.internet.asyncioreactor.AsyncioSelectorReactor) "
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
) in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_same_loop(self):
log = self.run_script("asyncio_custom_loop_custom_settings_same.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
@pytest.mark.requires_uvloop
def test_asyncio_enabled_reactor_different_loop(self):
log = self.run_script("asyncio_custom_loop_custom_settings_different.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the one specified in the ASYNCIO_EVENT_LOOP "
"setting (uvloop.Loop)"
) in log
def test_reactorless_simple(self):
log = self.run_script("reactorless_simple.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "is_reactorless(): True" in log
assert "ERROR: " not in log
assert "WARNING: " not in log
def test_reactorless_datauri(self):
log = self.run_script("reactorless_datauri.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "{'data': 'foo'}" in log
assert "'item_scraped_count': 1" in log
assert "ERROR: " not in log
assert "WARNING: " not in log
def test_reactorless_import_hook(self):
log = self.run_script("reactorless_import_hook.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "ImportError: Import of twisted.internet.reactor is forbidden" in log
def test_reactorless_telnetconsole_default(self):
"""By default TWISTED_ENABLED=False silently sets TELNETCONSOLE_ENABLED=False."""
log = self.run_script("reactorless_telnetconsole_default.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "The TelnetConsole extension requires a Twisted reactor" not in log
assert "scrapy.extensions.telnet.TelnetConsole" not in log
def test_reactorless_telnetconsole_disabled(self):
"""Explicit TELNETCONSOLE_ENABLED=False, there are no warnings."""
log = self.run_script("reactorless_telnetconsole_disabled.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "The TelnetConsole extension requires a Twisted reactor" not in log
assert "scrapy.extensions.telnet.TelnetConsole" not in log
def test_reactorless_telnetconsole_enabled(self):
"""Explicit TELNETCONSOLE_ENABLED=True, the user gets a warning."""
log = self.run_script("reactorless_telnetconsole_enabled.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "The TelnetConsole extension requires a Twisted reactor" in log
def test_reactorless_reactor(self):
log = self.run_script("reactorless_reactor.py")
assert (
"RuntimeError: TWISTED_ENABLED is False but a Twisted reactor is installed"
in log
)
class TestCrawlerRunnerSubprocessBase(ScriptRunnerMixin):
"""Common tests between CrawlerRunner and AsyncCrawlerRunner,
with the same file names and expectations.
"""
def test_simple(self):
log = self.run_script("simple.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "is_reactorless(): False" in log
def test_multi_parallel(self):
log = self.run_script("multi_parallel.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert re.search(
r"Spider opened.+Spider opened.+Closing spider.+Closing spider",
log,
re.DOTALL,
)
def test_multi_seq(self):
log = self.run_script("multi_seq.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert re.search(
r"Spider opened.+Closing spider.+Spider opened.+Closing spider",
log,
re.DOTALL,
)
@pytest.mark.requires_uvloop
def test_custom_loop_same(self):
log = self.run_script("custom_loop_same.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Using asyncio event loop: uvloop.Loop" in log
@pytest.mark.requires_uvloop
def test_custom_loop_different(self):
log = self.run_script("custom_loop_different.py")
assert "Spider closed (finished)" not in log
assert (
"does not match the one specified in the ASYNCIO_EVENT_LOOP "
"setting (uvloop.Loop)"
) in log
class TestCrawlerRunnerSubprocess(TestCrawlerRunnerSubprocessBase):
@property
def script_dir(self) -> Path:
return self.get_script_dir("CrawlerRunner")
def test_explicit_default_reactor(self):
log = self.run_script("explicit_default_reactor.py")
assert "Spider closed (finished)" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
assert "INFO: Spider closed (finished)" in log
assert "INFO: Host: not.a.real.domain" in log
assert "INFO: Type: <class 'ipaddress.IPv4Address'>" in log
assert "INFO: IP address: 127.0.0.1" in log
def test_change_default_reactor(self):
log = self.run_script("change_reactor.py")
assert (
"DEBUG: Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "DEBUG: Using asyncio event loop" in log
def test_reactorless(self):
log = self.run_script("reactorless.py")
assert (
"RuntimeError: CrawlerRunner doesn't support TWISTED_ENABLED=False" in log
)
class TestAsyncCrawlerRunnerSubprocess(TestCrawlerRunnerSubprocessBase):
@property
def script_dir(self) -> Path:
return self.get_script_dir("AsyncCrawlerRunner")
def test_simple_default_reactor(self):
log = self.run_script("simple_default_reactor.py")
assert "Spider closed (finished)" not in log
assert (
"RuntimeError: When TWISTED_ENABLED is True, "
"AsyncCrawlerRunner requires that the installed Twisted reactor"
) in log
def test_reactorless_simple(self):
log = self.run_script("reactorless_simple.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "is_reactorless(): True" in log
assert "ERROR: " not in log
assert "WARNING: " not in log
def test_reactorless_datauri(self):
log = self.run_script("reactorless_datauri.py")
assert "Not using a Twisted reactor" in log
assert "Spider closed (finished)" in log
assert "{'data': 'foo'}" in log
assert "'item_scraped_count': 1" in log
assert "ERROR: " not in log
assert "WARNING: " not in log
def test_reactorless_reactor(self):
log = self.run_script("reactorless_reactor.py")
assert (
"RuntimeError: TWISTED_ENABLED is False but a Twisted reactor is installed"
in log
)
@pytest.mark.parametrize(
("settings", "items"),
[
({}, default_settings.LOG_VERSIONS),
({"LOG_VERSIONS": ["itemadapter"]}, ["itemadapter"]),
({"LOG_VERSIONS": []}, None),
],
)
def test_log_scrapy_info(settings, items, caplog):
with caplog.at_level("INFO"):
CrawlerProcess(settings, install_root_handler=False)
assert (
caplog.records[0].getMessage()
== f"Scrapy {scrapy.__version__} started (bot: scrapybot)"
), repr(caplog.records[0].msg)
if not items:
assert len(caplog.records) == 1
return
version_string = caplog.records[1].getMessage()
expected_items_pattern = "',\n '".join(
f"{item}': '[^']+('\n +'[^']+)*" for item in items
)
assert re.search(r"^Versions:\n{'" + expected_items_pattern + "'}$", version_string)
@coroutine_test
async def test_deprecated_crawler_stop() -> None:
crawler = get_crawler(DefaultSpider)
d = crawler.crawl()
await maybe_deferred_to_future(d)
with pytest.warns(
ScrapyDeprecationWarning, match=r"Crawler.stop\(\) is deprecated"
):
await maybe_deferred_to_future(crawler.stop())
|
python
|
github
|
https://github.com/scrapy/scrapy
|
tests/test_crawler.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import thread
from test.test_support import verbose, TestSkipped, TestFailed
critical_section = thread.allocate_lock()
done = thread.allocate_lock()
def task():
global N, critical_section, done
import random
x = random.randrange(1, 3)
critical_section.acquire()
N -= 1
# Must release critical_section before releasing done, else the main
# thread can exit and set critical_section to None as part of global
# teardown; then critical_section.release() raises AttributeError.
finished = N == 0
critical_section.release()
if finished:
done.release()
def test_import_hangers():
import sys
if verbose:
print "testing import hangers ...",
import test.threaded_import_hangers
try:
if test.threaded_import_hangers.errors:
raise TestFailed(test.threaded_import_hangers.errors)
elif verbose:
print "OK."
finally:
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
del sys.modules['test.threaded_import_hangers']
# Tricky: When regrtest imports this module, the thread running regrtest
# grabs the import lock and won't let go of it until this module returns.
# All other threads attempting an import hang for the duration. Since
# this test spawns threads that do little *but* import, we can't do that
# successfully until after this module finishes importing and regrtest
# regains control. To make this work, a special case was added to
# regrtest to invoke a module's "test_main" function (if any) after
# importing it.
def test_main(): # magic name! see above
global N, done
import imp
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise TestSkipped("can't run when import lock is held")
done.acquire()
for N in (20, 50) * 3:
if verbose:
print "Trying", N, "threads ...",
for i in range(N):
thread.start_new_thread(task, ())
done.acquire()
if verbose:
print "OK."
done.release()
test_import_hangers()
if __name__ == "__main__":
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibRecord - XML MARC processing library for Invenio.
For API, see create_record(), record_get_field_instances() and friends
in the source code of this file in the section entitled INTERFACE.
Note: Does not access the database, the input is MARCXML only."""
### IMPORT INTERESTING MODULES AND XML PARSERS
import re
import sys
from cStringIO import StringIO
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.bibrecord_config import CFG_MARC21_DTD, \
CFG_BIBRECORD_WARNING_MSGS, CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, \
CFG_BIBRECORD_DEFAULT_CORRECT, CFG_BIBRECORD_PARSERS_AVAILABLE, \
InvenioBibRecordParserError, InvenioBibRecordFieldError
from invenio.config import CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
from invenio.textutils import encode_for_xml
# Some values used for the RXP parsing.
TAG, ATTRS, CHILDREN = 0, 1, 2
# Find out about the best usable parser:
AVAILABLE_PARSERS = []
# Do we remove singletons (empty tags)?
# NOTE: this is currently set to True as there are some external workflow
# exploiting singletons, e.g. bibupload -c used to delete fields, and
# bibdocfile --fix-marc called on a record where the latest document
# has been deleted.
CFG_BIBRECORD_KEEP_SINGLETONS = True
try:
import pyRXP
if 'pyrxp' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('pyrxp')
except ImportError:
pass
try:
from lxml import etree
if 'lxml' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('lxml')
except ImportError:
pass
try:
import Ft.Xml.Domlette
if '4suite' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('4suite')
except ImportError:
pass
except Exception, err:
from warnings import warn
warn("Error when importing 4suite: %s" % err)
pass
try:
import xml.dom.minidom
import xml.parsers.expat
if 'minidom' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('minidom')
except ImportError:
pass
### INTERFACE / VISIBLE FUNCTIONS
def create_field(subfields=None, ind1=' ', ind2=' ', controlfield_value='',
global_position=-1):
"""
Returns a field created with the provided elements. Global position is
set arbitrary to -1."""
if subfields is None:
subfields = []
ind1, ind2 = _wash_indicators(ind1, ind2)
field = (subfields, ind1, ind2, controlfield_value, global_position)
_check_field_validity(field)
return field
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a list of records from the marcxml description. Returns a
list of objects initiated by the function create_record(). Please
see that function's docstring."""
# Use the DOTALL flag to include newlines.
regex = re.compile('<record.*?>.*?</record>', re.DOTALL)
record_xmls = regex.findall(marcxml)
return [create_record(record_xml, verbose=verbose, correct=correct,
parser=parser, keep_singletons=keep_singletons) for record_xml in record_xmls]
def create_record(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
sort_fields_by_indicators=False,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object from the marcxml description.
Uses the best parser available in CFG_BIBRECORD_PARSERS_AVAILABLE or
the parser specified.
The returned object is a tuple (record, status_code, list_of_errors),
where status_code is 0 when there are errors, 1 when no errors.
The return record structure is as follows:
Record := {tag : [Field]}
Field := (Subfields, ind1, ind2, value)
Subfields := [(code, value)]
For example:
______
|record|
------
__________________________|_______________________________________
|record['001'] |record['909'] |record['520'] |
| | | |
[list of fields] [list of fields] [list of fields] ...
| ______|______________ |
|[0] |[0] |[1] | |[0]
___|_____ _____|___ ___|_____ ... ____|____
|Field 001| |Field 909| |Field 909| |Field 520|
--------- --------- --------- ---------
| _______________|_________________ | |
... |[0] |[1] |[2] | ... ...
| | | |
[list of subfields] 'C' '4'
___|__________________________________________
| | |
('a', 'value') ('b', 'value for subfield b') ('a', 'value for another a')
@param marcxml: an XML string representation of the record to create
@param verbose: the level of verbosity: 0 (silent), 1-2 (warnings),
3(strict:stop when errors)
@param correct: 1 to enable correction of marcxml syntax. Else 0.
@return: a tuple (record, status_code, list_of_errors), where status
code is 0 where there are errors, 1 when no errors"""
# Select the appropriate parser.
parser = _select_parser(parser)
try:
if parser == 'pyrxp':
rec = _create_record_rxp(marcxml, verbose, correct,
keep_singletons=keep_singletons)
elif parser == 'lxml':
rec = _create_record_lxml(marcxml, verbose, correct,
keep_singletons=keep_singletons)
elif parser == '4suite':
rec = _create_record_4suite(marcxml,
keep_singletons=keep_singletons)
elif parser == 'minidom':
rec = _create_record_minidom(marcxml,
keep_singletons=keep_singletons)
except InvenioBibRecordParserError, ex1:
return (None, 0, str(ex1))
# _create_record = {
# 'pyrxp': _create_record_rxp,
# 'lxml': _create_record_lxml,
# '4suite': _create_record_4suite,
# 'minidom': _create_record_minidom,
# }
# try:
# rec = _create_record[parser](marcxml, verbose)
# except InvenioBibRecordParserError, ex1:
# return (None, 0, str(ex1))
if sort_fields_by_indicators:
_record_sort_by_indicators(rec)
errs = []
if correct:
# Correct the structure of the record.
errs = _correct_record(rec)
return (rec, int(not errs), errs)
def filter_field_instances(field_instances, filter_subcode, filter_value, filter_mode = 'e'):
""" Filters given field and returns only that field instances
that contain filter_subcode with given filter_value.
As an input for search function accepts output from
record_get_field_instances function.
Function can be run in three modes:
'e' - looking for exact match in subfield value
's' - looking for substring in subfield value
'r' - looking for regular expression in subfield value
Example:
record_filter_field(record_get_field_instances(rec, '999', '%', '%'), 'y', '2001')
In this case filter_subcode is 'y' and
filter_value is '2001'.
@param field_instances: output from record_get_field_instances
@param filter_subcode: name of the subfield
@type filter_subcode: string
@param filter_value: value of the subfield
@type filter_value: string
@param filter_mode: 'e','s' or 'r'
"""
matched = []
if filter_mode == 'e':
to_match = (filter_subcode, filter_value)
for instance in field_instances:
if to_match in instance[0]:
matched.append(instance)
elif filter_mode == 's':
for instance in field_instances:
for subfield in instance[0]:
if subfield[0] == filter_subcode and \
subfield[1].find(filter_value) > -1:
matched.append(instance)
break
elif filter_mode == 'r':
reg_exp = re.compile(filter_value)
for instance in field_instances:
for subfield in instance[0]:
if subfield[0] == filter_subcode and \
reg_exp.match(subfield[1]) is not None:
matched.append(instance)
break
return matched
def records_identical(rec1, rec2, skip_005=True, ignore_field_order=False, ignore_subfield_order=False, ignore_duplicate_subfields=False, ignore_duplicate_controlfields=False):
"""
Return True if rec1 is identical to rec2, regardless of a difference
in the 005 tag (i.e. the timestamp).
"""
rec1_keys = set(rec1.keys())
rec2_keys = set(rec2.keys())
if skip_005:
rec1_keys.discard("005")
rec2_keys.discard("005")
if rec1_keys != rec2_keys:
return False
for key in rec1_keys:
if ignore_duplicate_controlfields and key.startswith('00'):
if set(field[3] for field in rec1[key]) != set(field[3] for field in rec2[key]):
return False
continue
rec1_fields = rec1[key]
rec2_fields = rec2[key]
if len(rec1_fields) != len(rec2_fields):
## They already differs in length...
return False
if ignore_field_order:
## We sort the fields, first by indicators and then by anything else
rec1_fields = sorted(rec1_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0]))
rec2_fields = sorted(rec2_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0]))
else:
## We sort the fields, first by indicators, then by global position and then by anything else
rec1_fields = sorted(rec1_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0]))
rec2_fields = sorted(rec2_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0]))
for field1, field2 in zip(rec1_fields, rec2_fields):
if ignore_duplicate_subfields:
if field1[1:4] != field2[1:4] or set(field1[0]) != set(field2[0]):
return False
elif ignore_subfield_order:
if field1[1:4] != field2[1:4] or sorted(field1[0]) != sorted(field2[0]):
return False
elif field1[:4] != field2[:4]:
return False
return True
def record_get_field_instances(rec, tag="", ind1=" ", ind2=" "):
"""Returns the list of field instances for the specified tag and
indicators of the record (rec).
Returns empty list if not found.
If tag is empty string, returns all fields
Parameters (tag, ind1, ind2) can contain wildcard %.
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: a list of field tuples (Subfields, ind1, ind2, value,
field_position_global) where subfields is list of (code, value)"""
if not rec:
return []
if not tag:
return rec.items()
else:
out = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if '%' in tag:
# Wildcard in tag. Check all possible
for field_tag in rec:
if _tag_matches_pattern(field_tag, tag):
for possible_field_instance in rec[field_tag]:
if (ind1 in ('%', possible_field_instance[1]) and
ind2 in ('%', possible_field_instance[2])):
out.append(possible_field_instance)
else:
# Completely defined tag. Use dict
for possible_field_instance in rec.get(tag, []):
if (ind1 in ('%', possible_field_instance[1]) and
ind2 in ('%', possible_field_instance[2])):
out.append(possible_field_instance)
return out
def record_add_field(rec, tag, ind1=' ', ind2=' ', controlfield_value='',
subfields=None, field_position_global=None, field_position_local=None):
"""
Adds a new field into the record.
If field_position_global or field_position_local is specified then
this method will insert the new field at the desired position.
Otherwise a global field position will be computed in order to
insert the field at the best position (first we try to keep the
order of the tags and then we insert the field at the end of the
fields with the same tag).
If both field_position_global and field_position_local are present,
then field_position_local takes precedence.
@param rec: the record data structure
@param tag: the tag of the field to be added
@param ind1: the first indicator
@param ind2: the second indicator
@param controlfield_value: the value of the controlfield
@param subfields: the subfields (a list of tuples (code, value))
@param field_position_global: the global field position (record wise)
@param field_position_local: the local field position (tag wise)
@return: the global field position of the newly inserted field or -1 if the
operation failed
"""
error = validate_record_field_positions_global(rec)
if error:
# FIXME one should write a message here
pass
# Clean the parameters.
if subfields is None:
subfields = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if controlfield_value and (ind1 != ' ' or ind2 != ' ' or subfields):
return -1
# Detect field number to be used for insertion:
# Dictionaries for uniqueness.
tag_field_positions_global = {}.fromkeys([field[4]
for field in rec.get(tag, [])])
all_field_positions_global = {}.fromkeys([field[4]
for fields in rec.values()
for field in fields])
if field_position_global is None and field_position_local is None:
# Let's determine the global field position of the new field.
if tag in rec:
try:
field_position_global = max([field[4] for field in rec[tag]]) \
+ 1
except IndexError:
if tag_field_positions_global:
field_position_global = max(tag_field_positions_global) + 1
elif all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
else:
if tag in ('FMT', 'FFT', 'BDR', 'BDM'):
# Add the new tag to the end of the record.
if tag_field_positions_global:
field_position_global = max(tag_field_positions_global) + 1
elif all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
else:
# Insert the tag in an ordered way by selecting the
# right global field position.
immediate_lower_tag = '000'
for rec_tag in rec:
if (tag not in ('FMT', 'FFT', 'BDR', 'BDM') and
immediate_lower_tag < rec_tag < tag):
immediate_lower_tag = rec_tag
if immediate_lower_tag == '000':
field_position_global = 1
else:
field_position_global = rec[immediate_lower_tag][-1][4] + 1
field_position_local = len(rec.get(tag, []))
_shift_field_positions_global(rec, field_position_global, 1)
elif field_position_local is not None:
if tag in rec:
if field_position_local >= len(rec[tag]):
field_position_global = rec[tag][-1][4] + 1
else:
field_position_global = rec[tag][field_position_local][4]
_shift_field_positions_global(rec, field_position_global, 1)
else:
if all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
# Empty record.
field_position_global = 1
elif field_position_global is not None:
# If the user chose an existing global field position, shift all the
# global field positions greater than the input global field position.
if tag not in rec:
if all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
field_position_local = 0
elif field_position_global < min(tag_field_positions_global):
field_position_global = min(tag_field_positions_global)
_shift_field_positions_global(rec, min(tag_field_positions_global),
1)
field_position_local = 0
elif field_position_global > max(tag_field_positions_global):
field_position_global = max(tag_field_positions_global) + 1
_shift_field_positions_global(rec,
max(tag_field_positions_global) + 1, 1)
field_position_local = len(rec.get(tag, []))
else:
if field_position_global in tag_field_positions_global:
_shift_field_positions_global(rec, field_position_global, 1)
field_position_local = 0
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global + 1:
field_position_local = position
# Create the new field.
newfield = (subfields, ind1, ind2, str(controlfield_value),
field_position_global)
rec.setdefault(tag, []).insert(field_position_local, newfield)
# Return new field number:
return field_position_global
def record_has_field(rec, tag):
"""
Checks if the tag exists in the record.
@param rec: the record data structure
@param the: field
@return: a boolean
"""
return tag in rec
def record_delete_field(rec, tag, ind1=' ', ind2=' ',
field_position_global=None, field_position_local=None):
"""
If global field position is specified, deletes the field with the
corresponding global field position.
If field_position_local is specified, deletes the field with the
corresponding local field position and tag.
Else deletes all the fields matching tag and optionally ind1 and
ind2.
If both field_position_global and field_position_local are present,
then field_position_local takes precedence.
@param rec: the record data structure
@param tag: the tag of the field to be deleted
@param ind1: the first indicator of the field to be deleted
@param ind2: the second indicator of the field to be deleted
@param field_position_global: the global field position (record wise)
@param field_position_local: the local field position (tag wise)
@return: the list of deleted fields
"""
error = validate_record_field_positions_global(rec)
if error:
# FIXME one should write a message here.
pass
if tag not in rec:
return False
ind1, ind2 = _wash_indicators(ind1, ind2)
deleted = []
newfields = []
if field_position_global is None and field_position_local is None:
# Remove all fields with tag 'tag'.
for field in rec[tag]:
if field[1] != ind1 or field[2] != ind2:
newfields.append(field)
else:
deleted.append(field)
rec[tag] = newfields
elif field_position_global is not None:
# Remove the field with 'field_position_global'.
for field in rec[tag]:
if (field[1] != ind1 and field[2] != ind2 or
field[4] != field_position_global):
newfields.append(field)
else:
deleted.append(field)
rec[tag] = newfields
elif field_position_local is not None:
# Remove the field with 'field_position_local'.
try:
del rec[tag][field_position_local]
except IndexError:
return []
if not rec[tag]:
# Tag is now empty, remove it.
del rec[tag]
return deleted
def record_delete_fields(rec, tag, field_positions_local=None):
"""
Delete all/some fields defined with MARC tag 'tag' from record 'rec'.
@param rec: a record structure.
@type rec: tuple
@param tag: three letter field.
@type tag: string
@param field_position_local: if set, it is the list of local positions
within all the fields with the specified tag, that should be deleted.
If not set all the fields with the specified tag will be deleted.
@type field_position_local: sequence
@return: the list of deleted fields.
@rtype: list
@note: the record is modified in place.
"""
if tag not in rec:
return []
new_fields, deleted_fields = [], []
for position, field in enumerate(rec.get(tag, [])):
if field_positions_local is None or position in field_positions_local:
deleted_fields.append(field)
else:
new_fields.append(field)
if new_fields:
rec[tag] = new_fields
else:
del rec[tag]
return deleted_fields
def record_add_fields(rec, tag, fields, field_position_local=None,
field_position_global=None):
"""
Adds the fields into the record at the required position. The
position is specified by the tag and the field_position_local in
the list of fields.
@param rec: a record structure
@param tag: the tag of the fields
to be moved
@param field_position_local: the field_position_local to which the
field will be inserted. If not specified, appends the fields to
the tag.
@param a: list of fields to be added
@return: -1 if the operation failed, or the field_position_local
if it was successful
"""
if field_position_local is None and field_position_global is None:
for field in fields:
record_add_field(rec, tag, ind1=field[1],
ind2=field[2], subfields=field[0],
controlfield_value=field[3])
else:
fields.reverse()
for field in fields:
record_add_field(rec, tag, ind1=field[1], ind2=field[2],
subfields=field[0], controlfield_value=field[3],
field_position_local=field_position_local,
field_position_global=field_position_global)
return field_position_local
def record_move_fields(rec, tag, field_positions_local,
field_position_local=None):
"""
Moves some fields to the position specified by
'field_position_local'.
@param rec: a record structure as returned by create_record()
@param tag: the tag of the fields to be moved
@param field_positions_local: the positions of the
fields to move
@param field_position_local: insert the field before that
field_position_local. If unspecified, appends the fields
@return: the field_position_local is the operation was successful
"""
fields = record_delete_fields(rec, tag,
field_positions_local=field_positions_local)
return record_add_fields(rec, tag, fields,
field_position_local=field_position_local)
def record_delete_subfield(rec, tag, subfield_code, ind1=' ', ind2=' '):
"""Deletes all subfields with subfield_code in the record."""
ind1, ind2 = _wash_indicators(ind1, ind2)
for field in rec.get(tag, []):
if field[1] == ind1 and field[2] == ind2:
field[0][:] = [subfield for subfield in field[0]
if subfield_code != subfield[0]]
def record_get_field(rec, tag, field_position_global=None,
field_position_local=None):
"""
Returns the the matching field. One has to enter either a global
field position or a local field position.
@return: a list of subfield tuples (subfield code, value).
@rtype: list
"""
if field_position_global is None and field_position_local is None:
raise InvenioBibRecordFieldError("A field position is required to "
"complete this operation.")
elif field_position_global is not None and field_position_local is not None:
raise InvenioBibRecordFieldError("Only one field position is required "
"to complete this operation.")
elif field_position_global:
if not tag in rec:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
for field in rec[tag]:
if field[4] == field_position_global:
return field
raise InvenioBibRecordFieldError("No field has the tag '%s' and the "
"global field position '%d'." % (tag, field_position_global))
else:
try:
return rec[tag][field_position_local]
except KeyError:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
except IndexError:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the local field position '%d'." % (tag, field_position_local))
def record_replace_field(rec, tag, new_field, field_position_global=None,
field_position_local=None):
"""Replaces a field with a new field."""
if field_position_global is None and field_position_local is None:
raise InvenioBibRecordFieldError("A field position is required to "
"complete this operation.")
elif field_position_global is not None and field_position_local is not None:
raise InvenioBibRecordFieldError("Only one field position is required "
"to complete this operation.")
elif field_position_global:
if not tag in rec:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
replaced = False
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global:
rec[tag][position] = new_field
replaced = True
if not replaced:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the global field position '%d'." %
(tag, field_position_global))
else:
try:
rec[tag][field_position_local] = new_field
except KeyError:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
except IndexError:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the local field position '%d'." % (tag, field_position_local))
def record_get_subfields(rec, tag, field_position_global=None,
field_position_local=None):
"""
Returns the subfield of the matching field. One has to enter either a
global field position or a local field position.
@return: a list of subfield tuples (subfield code, value).
@rtype: list
"""
field = record_get_field(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
return field[0]
def record_delete_subfield_from(rec, tag, subfield_position,
field_position_global=None, field_position_local=None):
"""Delete subfield from position specified by tag, field number and
subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
del subfields[subfield_position]
except IndexError:
from invenio.xmlmarc2textmarc import create_marc_record
recordMarc = create_marc_record(rec, 0, {"text-marc": 1, "aleph-marc": 0})
raise InvenioBibRecordFieldError("The record : %(recordCode)s does not contain the subfield "
"'%(subfieldIndex)s' inside the field (local: '%(fieldIndexLocal)s, global: '%(fieldIndexGlobal)s' ) of tag '%(tag)s'." % \
{"subfieldIndex" : subfield_position, \
"fieldIndexLocal" : str(field_position_local), \
"fieldIndexGlobal" : str(field_position_global), \
"tag" : tag, \
"recordCode" : recordMarc})
if not subfields:
if field_position_global is not None:
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global:
del rec[tag][position]
else:
del rec[tag][field_position_local]
if not rec[tag]:
del rec[tag]
def record_add_subfield_into(rec, tag, subfield_code, value,
subfield_position=None, field_position_global=None,
field_position_local=None):
"""Add subfield into position specified by tag, field number and
optionally by subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
if subfield_position is None:
subfields.append((subfield_code, value))
else:
subfields.insert(subfield_position, (subfield_code, value))
def record_modify_controlfield(rec, tag, controlfield_value,
field_position_global=None, field_position_local=None):
"""Modify controlfield at position specified by tag and field number."""
field = record_get_field(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
new_field = (field[0], field[1], field[2], controlfield_value, field[4])
record_replace_field(rec, tag, new_field,
field_position_global=field_position_global,
field_position_local=field_position_local)
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position,
field_position_global=None, field_position_local=None):
"""Modify subfield at position specified by tag, field number and
subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfields[subfield_position] = (subfield_code, value)
except IndexError:
raise InvenioBibRecordFieldError("There is no subfield with position "
"'%d'." % subfield_position)
def record_move_subfield(rec, tag, subfield_position, new_subfield_position,
field_position_global=None, field_position_local=None):
"""Move subfield at position specified by tag, field number and
subfield position to new subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfield = subfields.pop(subfield_position)
subfields.insert(new_subfield_position, subfield)
except IndexError:
raise InvenioBibRecordFieldError("There is no subfield with position "
"'%d'." % subfield_position)
def record_get_field_value(rec, tag, ind1=" ", ind2=" ", code=""):
"""Returns first (string) value that matches specified field
(tag, ind1, ind2, code) of the record (rec).
Returns empty string if not found.
Parameters (tag, ind1, ind2, code) can contain wildcard %.
Difference between wildcard % and empty '':
- Empty char specifies that we are not interested in a field which
has one of the indicator(s)/subfield specified.
- Wildcard specifies that we are interested in getting the value
of the field whatever the indicator(s)/subfield is.
For e.g. consider the following record in MARC:
100C5 $$a val1
555AB $$a val2
555AB val3
555 $$a val4
555A val5
>> record_get_field_value(record, '555', 'A', '', '')
>> "val5"
>> record_get_field_value(record, '555', 'A', '%', '')
>> "val3"
>> record_get_field_value(record, '555', 'A', '%', '%')
>> "val2"
>> record_get_field_value(record, '555', 'A', 'B', '')
>> "val3"
>> record_get_field_value(record, '555', '', 'B', 'a')
>> ""
>> record_get_field_value(record, '555', '', '', 'a')
>> "val4"
>> record_get_field_value(record, '555', '', '', '')
>> ""
>> record_get_field_value(record, '%%%', '%', '%', '%')
>> "val1"
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: string value (empty if nothing found)"""
# Note: the code is quite redundant for speed reasons (avoid calling
# functions or doing tests inside loops)
ind1, ind2 = _wash_indicators(ind1, ind2)
if '%' in tag:
# Wild card in tag. Must find all corresponding fields
if code == '':
# Code not specified.
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
# Return matching field value if not empty
if field[3]:
return field[3]
elif code == '%':
# Code is wildcard. Take first subfield of first matching field
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2])
and field[0]):
return field[0][0][1]
else:
# Code is specified. Take corresponding one
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
for subfield in field[0]:
if subfield[0] == code:
return subfield[1]
else:
# Tag is completely specified. Use tag as dict key
if tag in rec:
if code == '':
# Code not specified.
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
# Return matching field value if not empty
# or return "" empty if not exist.
if field[3]:
return field[3]
elif code == '%':
# Code is wildcard. Take first subfield of first matching field
for field in rec[tag]:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and
field[0]):
return field[0][0][1]
else:
# Code is specified. Take corresponding one
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
for subfield in field[0]:
if subfield[0] == code:
return subfield[1]
# Nothing was found
return ""
def record_get_field_values(rec, tag, ind1=" ", ind2=" ", code="",
filter_subfield_code="",
filter_subfield_value="",
filter_subfield_mode="e"):
"""Returns the list of (string) values for the specified field
(tag, ind1, ind2, code) of the record (rec).
List can be filtered. Use filter_subfield_code
and filter_subfield_value to search
only in fields that have these values inside them as a subfield.
filter_subfield_mode can have 3 different values:
'e' for exact search
's' for substring search
'r' for regexp search
Returns empty list if nothing was found.
Parameters (tag, ind1, ind2, code) can contain wildcard %.
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: a list of strings"""
tmp = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if filter_subfield_code and filter_subfield_mode == "r":
reg_exp = re.compile(filter_subfield_value)
tags = []
if '%' in tag:
# Wild card in tag. Must find all corresponding tags and fields
tags = [k for k in rec if _tag_matches_pattern(k, tag)]
elif rec and tag in rec:
tags = [tag]
if code == '':
# Code not specified. Consider field value (without subfields)
for tag in tags:
for field in rec[tag]:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and
field[3]):
tmp.append(field[3])
elif code == '%':
# Code is wildcard. Consider all subfields
for tag in tags:
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
if filter_subfield_code:
if filter_subfield_mode == "e":
subfield_to_match = (filter_subfield_code, filter_subfield_value)
if subfield_to_match in field[0]:
for subfield in field[0]:
tmp.append(subfield[1])
elif filter_subfield_mode == "s":
if (dict(field[0]).get(filter_subfield_code, '')).find(filter_subfield_value) > -1:
for subfield in field[0]:
tmp.append(subfield[1])
elif filter_subfield_mode == "r":
if reg_exp.match(dict(field[0]).get(filter_subfield_code, '')):
for subfield in field[0]:
tmp.append(subfield[1])
else:
for subfield in field[0]:
tmp.append(subfield[1])
else:
# Code is specified. Consider all corresponding subfields
for tag in tags:
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
if filter_subfield_code:
if filter_subfield_mode == "e":
subfield_to_match = (filter_subfield_code, filter_subfield_value)
if subfield_to_match in field[0]:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
elif filter_subfield_mode == "s":
if (dict(field[0]).get(filter_subfield_code, '')).find(filter_subfield_value) > -1:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
elif filter_subfield_mode == "r":
if reg_exp.match(dict(field[0]).get(filter_subfield_code, '')):
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
else:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
# If tmp was not set, nothing was found
return tmp
def record_xml_output(rec, tags=None, order_fn=None):
"""Generates the XML for record 'rec' and returns it as a string
@rec: record
@tags: list of tags to be printed"""
if tags is None:
tags = []
if isinstance(tags, str):
tags = [tags]
if tags and '001' not in tags:
# Add the missing controlfield.
tags.append('001')
marcxml = ['<record>']
# Add the tag 'tag' to each field in rec[tag]
fields = []
if rec is not None:
for tag in rec:
if not tags or tag in tags:
for field in rec[tag]:
fields.append((tag, field))
if order_fn is None:
record_order_fields(fields)
else:
record_order_fields(fields, order_fn)
for field in fields:
marcxml.append(field_xml_output(field[1], field[0]))
marcxml.append('</record>')
return '\n'.join(marcxml)
def field_get_subfield_instances(field):
"""Returns the list of subfields associated with field 'field'"""
return field[0]
def field_get_subfield_values(field_instance, code):
"""Return subfield CODE values of the field instance FIELD."""
return [subfield_value
for subfield_code, subfield_value in field_instance[0]
if subfield_code == code]
def field_get_subfield_codes(field_instance):
"""Return subfield codes of the field instance FIELD."""
return [subfield_code
for subfield_code, subfield_value in field_instance[0]]
def field_add_subfield(field, code, value):
"""Adds a subfield to field 'field'"""
field[0].append((code, value))
def record_order_fields(rec, fun="_order_by_ord"):
"""Orders field inside record 'rec' according to a function"""
rec.sort(eval(fun))
def field_xml_output(field, tag):
"""Generates the XML for field 'field' and returns it as a string."""
marcxml = []
if field[3]:
marcxml.append(' <controlfield tag="%s">%s</controlfield>' %
(tag, encode_for_xml(field[3])))
else:
marcxml.append(' <datafield tag="%s" ind1="%s" ind2="%s">' %
(tag, field[1], field[2]))
marcxml += [_subfield_xml_output(subfield) for subfield in field[0]]
marcxml.append(' </datafield>')
return '\n'.join(marcxml)
def record_extract_oai_id(record):
"""Returns the OAI ID of the record."""
tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3]
ind1 = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3]
ind2 = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4]
subfield = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5]
values = record_get_field_values(record, tag, ind1, ind2, subfield)
oai_id_regex = re.compile("oai[a-zA-Z0-9/.:]+")
for value in [value.strip() for value in values]:
if oai_id_regex.match(value):
return value
return ""
def record_extract_dois(record):
"""Returns the DOI(s) of the record."""
record_dois = []
tag = "024"
ind1 = "7"
ind2 = "_"
subfield_source_code = "2"
subfield_value_code = "a"
identifiers_fields = record_get_field_instances(record, tag, ind1, ind2)
for identifer_field in identifiers_fields:
if 'doi' in [val.lower() for val in field_get_subfield_values(identifer_field, subfield_source_code)]:
record_dois.extend(field_get_subfield_values(identifer_field, subfield_value_code))
return record_dois
def print_rec(rec, format=1, tags=None):
"""
prints a record
format = 1 -- XML
format = 2 -- HTML (not implemented)
@param tags: list of tags to be printed
"""
if tags is None:
tags = []
if format == 1:
text = record_xml_output(rec, tags)
else:
return ''
return text
def print_recs(listofrec, format=1, tags=None):
"""
prints a list of records
@param format: 1 XML, 2 HTML (not implemented)
@param tags: list of tags to be printed
if 'listofrec' is not a list it returns empty string
"""
if tags is None:
tags = []
text = ""
if type(listofrec).__name__ !='list':
return ""
else:
for rec in listofrec:
text = "%s\n%s" % (text, print_rec(rec, format, tags))
return text
def concat(alist):
"""Concats a list of lists"""
newl = []
for l in alist:
newl.extend(l)
return newl
def record_find_field(rec, tag, field, strict=False):
"""
Returns the global and local positions of the first occurrence
of the field in a record.
@param rec: A record dictionary structure
@type rec: dictionary
@param tag: The tag of the field to search for
@type tag: string
@param field: A field tuple as returned by create_field()
@type field: tuple
@param strict: A boolean describing the search method. If strict
is False, then the order of the subfields doesn't
matter. Default search method is strict.
@type strict: boolean
@return: A tuple of (global_position, local_position) or a
tuple (None, None) if the field is not present.
@rtype: tuple
@raise InvenioBibRecordFieldError: If the provided field is invalid.
"""
try:
_check_field_validity(field)
except InvenioBibRecordFieldError:
raise
for local_position, field1 in enumerate(rec.get(tag, [])):
if _compare_fields(field, field1, strict):
return (field1[4], local_position)
return (None, None)
def record_strip_empty_volatile_subfields(rec):
"""
Removes unchanged volatile subfields from the record
"""
for tag in rec.keys():
for field in rec[tag]:
field[0][:] = [subfield for subfield in field[0] if subfield[1][:9] != "VOLATILE:"]
def record_strip_empty_fields(rec, tag=None):
"""
Removes empty subfields and fields from the record. If 'tag' is not None, only
a specific tag of the record will be stripped, otherwise the whole record.
@param rec: A record dictionary structure
@type rec: dictionary
@param tag: The tag of the field to strip empty fields from
@type tag: string
"""
# Check whole record
if tag is None:
tags = rec.keys()
for tag in tags:
record_strip_empty_fields(rec, tag)
# Check specific tag of the record
elif tag in rec:
# in case of a controlfield
if tag[:2] == '00':
if len(rec[tag]) == 0 or not rec[tag][0][3]:
del rec[tag]
#in case of a normal field
else:
fields = []
for field in rec[tag]:
subfields = []
for subfield in field[0]:
# check if the subfield has been given a value
if subfield[1]:
subfield = (subfield[0], subfield[1].strip()) # Always strip values
subfields.append(subfield)
if len(subfields) > 0:
new_field = create_field(subfields, field[1], field[2],
field[3])
fields.append(new_field)
if len(fields) > 0:
rec[tag] = fields
else:
del rec[tag]
def record_strip_controlfields(rec):
"""
Removes all non-empty controlfields from the record
@param rec: A record dictionary structure
@type rec: dictionary
"""
for tag in rec.keys():
if tag[:2] == '00' and rec[tag][0][3]:
del rec[tag]
def record_order_subfields(rec, tag=None):
""" Orders subfields from a record alphabetically based on subfield code.
If 'tag' is not None, only a specific tag of the record will be reordered,
otherwise the whole record.
@param rec: bibrecord
@type rec: bibrec
@param tag: tag where the subfields will be ordered
@type tag: string
"""
if rec is None:
return rec
if tag is None:
tags = rec.keys()
for tag in tags:
record_order_subfields(rec, tag)
elif tag in rec:
for i in xrange(len(rec[tag])):
field = rec[tag][i]
# Order subfields alphabetically by subfield code
ordered_subfields = sorted(field[0], key=lambda subfield: subfield[0])
rec[tag][i] = (ordered_subfields, field[1], field[2], field[3], field[4])
def record_empty(rec):
for key in rec.iterkeys():
if key not in ('001', '005'):
return False
return True
### IMPLEMENTATION / INVISIBLE FUNCTIONS
def _compare_fields(field1, field2, strict=True):
"""
Compares 2 fields. If strict is True, then the order of the
subfield will be taken care of, if not then the order of the
subfields doesn't matter.
@return: True if the field are equivalent, False otherwise.
"""
if strict:
# Return a simple equal test on the field minus the position.
return field1[:4] == field2[:4]
else:
if field1[1:4] != field2[1:4]:
# Different indicators or controlfield value.
return False
else:
# Compare subfields in a loose way.
return set(field1[0]) == set(field2[0])
def _check_field_validity(field):
"""
Checks if a field is well-formed.
@param field: A field tuple as returned by create_field()
@type field: tuple
@raise InvenioBibRecordFieldError: If the field is invalid.
"""
if type(field) not in (list, tuple):
raise InvenioBibRecordFieldError("Field of type '%s' should be either "
"a list or a tuple." % type(field))
if len(field) != 5:
raise InvenioBibRecordFieldError("Field of length '%d' should have 5 "
"elements." % len(field))
if type(field[0]) not in (list, tuple):
raise InvenioBibRecordFieldError("Subfields of type '%s' should be "
"either a list or a tuple." % type(field[0]))
if type(field[1]) is not str:
raise InvenioBibRecordFieldError("Indicator 1 of type '%s' should be "
"a string." % type(field[1]))
if type(field[2]) is not str:
raise InvenioBibRecordFieldError("Indicator 2 of type '%s' should be "
"a string." % type(field[2]))
if type(field[3]) is not str:
raise InvenioBibRecordFieldError("Controlfield value of type '%s' "
"should be a string." % type(field[3]))
if type(field[4]) is not int:
raise InvenioBibRecordFieldError("Global position of type '%s' should "
"be an int." % type(field[4]))
for subfield in field[0]:
if (type(subfield) not in (list, tuple) or
len(subfield) != 2 or
type(subfield[0]) is not str or
type(subfield[1]) is not str):
raise InvenioBibRecordFieldError("Subfields are malformed. "
"Should a list of tuples of 2 strings.")
def _shift_field_positions_global(record, start, delta=1):
"""Shifts all global field positions with global field positions
higher or equal to 'start' from the value 'delta'."""
if not delta:
return
for tag, fields in record.items():
newfields = []
for field in fields:
if field[4] < start:
newfields.append(field)
else:
# Increment the global field position by delta.
newfields.append(tuple(list(field[:4]) + [field[4] + delta]))
record[tag] = newfields
def _tag_matches_pattern(tag, pattern):
"""Returns true if MARC 'tag' matches a 'pattern'.
'pattern' is plain text, with % as wildcard
Both parameters must be 3 characters long strings.
For e.g.
>> _tag_matches_pattern("909", "909") -> True
>> _tag_matches_pattern("909", "9%9") -> True
>> _tag_matches_pattern("909", "9%8") -> False
@param tag: a 3 characters long string
@param pattern: a 3 characters long string
@return: False or True"""
for char1, char2 in zip(tag, pattern):
if char2 not in ('%', char1):
return False
return True
def validate_record_field_positions_global(record):
"""
Checks if the global field positions in the record are valid ie no
duplicate global field positions and local field positions in the
list of fields are ascending.
@param record: the record data structure
@return: the first error found as a string or None if no error was found
"""
all_fields = []
for tag, fields in record.items():
previous_field_position_global = -1
for field in fields:
if field[4] < previous_field_position_global:
return "Non ascending global field positions in tag '%s'." % tag
previous_field_position_global = field[4]
if field[4] in all_fields:
return ("Duplicate global field position '%d' in tag '%s'" %
(field[4], tag))
def _record_sort_by_indicators(record):
"""Sorts the fields inside the record by indicators."""
for tag, fields in record.items():
record[tag] = _fields_sort_by_indicators(fields)
def _fields_sort_by_indicators(fields):
"""Sorts a set of fields by their indicators. Returns a sorted list
with correct global field positions."""
field_dict = {}
field_positions_global = []
for field in fields:
field_dict.setdefault(field[1:3], []).append(field)
field_positions_global.append(field[4])
indicators = field_dict.keys()
indicators.sort()
field_list = []
for indicator in indicators:
for field in field_dict[indicator]:
field_list.append(field[:4] + (field_positions_global.pop(0),))
return field_list
def _select_parser(parser=None):
"""Selects the more relevant parser based on the parsers available
and on the parser desired by the user."""
if not AVAILABLE_PARSERS:
# No parser is available. This is bad.
return None
if parser is None or parser not in AVAILABLE_PARSERS:
# Return the best available parser.
return AVAILABLE_PARSERS[0]
else:
return parser
def _create_record_lxml(marcxml,
verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object using the LXML parser.
If correct == 1, then perform DTD validation
If correct == 0, then do not perform DTD validation
If verbose == 0, the parser will not give warnings.
If 1 <= verbose <= 3, the parser will not give errors, but will warn
the user about possible mistakes (implement me!)
If verbose > 3 then the parser will be strict and will stop in case of
well-formedness errors or DTD errors."""
parser = etree.XMLParser(dtd_validation = correct,
recover = verbose <= 3)
if correct:
marcxml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE collection SYSTEM "file://%s">\n'
'<collection>\n%s\n</collection>' \
% (CFG_MARC21_DTD, marcxml))
try:
tree = etree.parse(StringIO(marcxml), parser)
# parser errors are located in parser.error_log
# if 1 <= verbose <=3 then show them to the user?
# if verbose == 0 then continue
# if verbose >3 then an exception will be thrown
except Exception, e:
raise InvenioBibRecordParserError(str(e))
record = {}
field_position_global = 0
controlfield_iterator = tree.iter(tag='controlfield')
for controlfield in controlfield_iterator:
tag = controlfield.attrib.get('tag', '!').encode("UTF-8")
ind1 = ' '
ind2 = ' '
text = controlfield.text
if text is None:
text = ''
else:
text = text.encode("UTF-8")
subfields = []
if text or keep_singletons:
field_position_global += 1
record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global))
datafield_iterator = tree.iter(tag='datafield')
for datafield in datafield_iterator:
tag = datafield.attrib.get('tag', '!').encode("UTF-8")
ind1 = datafield.attrib.get('ind1', '!').encode("UTF-8")
ind2 = datafield.attrib.get('ind2', '!').encode("UTF-8")
#ind1, ind2 = _wash_indicators(ind1, ind2)
if ind1 in ('', '_'): ind1 = ' '
if ind2 in ('', '_'): ind2 = ' '
subfields = []
subfield_iterator = datafield.iter(tag='subfield')
for subfield in subfield_iterator:
code = subfield.attrib.get('code', '!').encode("UTF-8")
text = subfield.text
if text is None:
text = ''
else:
text = text.encode("UTF-8")
if text or keep_singletons:
subfields.append((code, text))
if subfields or keep_singletons:
text = ''
field_position_global += 1
record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global))
return record
def _create_record_rxp(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object using the RXP parser.
If verbose>3 then the parser will be strict and will stop in case of
well-formedness errors or DTD errors.
If verbose=0, the parser will not give warnings.
If 0 < verbose <= 3, the parser will not give errors, but will warn
the user about possible mistakes
correct != 0 -> We will try to correct errors such as missing
attributes
correct = 0 -> there will not be any attempt to correct errors"""
if correct:
# Note that with pyRXP < 1.13 a memory leak has been found
# involving DTD parsing. So enable correction only if you have
# pyRXP 1.13 or greater.
marcxml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE collection SYSTEM "file://%s">\n'
'<collection>\n%s\n</collection>' % (CFG_MARC21_DTD, marcxml))
# Create the pyRXP parser.
pyrxp_parser = pyRXP.Parser(ErrorOnValidityErrors=0, ProcessDTD=1,
ErrorOnUnquotedAttributeValues=0, srcName='string input')
if verbose > 3:
pyrxp_parser.ErrorOnValidityErrors = 1
pyrxp_parser.ErrorOnUnquotedAttributeValues = 1
try:
root = pyrxp_parser.parse(marcxml)
except pyRXP.error, ex1:
raise InvenioBibRecordParserError(str(ex1))
# If record is enclosed in a collection tag, extract it.
if root[TAG] == 'collection':
children = _get_children_by_tag_name_rxp(root, 'record')
if not children:
return {}
root = children[0]
record = {}
# This is needed because of the record_xml_output function, where we
# need to know the order of the fields.
field_position_global = 1
# Consider the control fields.
for controlfield in _get_children_by_tag_name_rxp(root, 'controlfield'):
if controlfield[CHILDREN]:
value = ''.join([n for n in controlfield[CHILDREN]])
# Construct the field tuple.
field = ([], ' ', ' ', value, field_position_global)
record.setdefault(controlfield[ATTRS]['tag'], []).append(field)
field_position_global += 1
elif keep_singletons:
field = ([], ' ', ' ', '', field_position_global)
record.setdefault(controlfield[ATTRS]['tag'], []).append(field)
field_position_global += 1
# Consider the data fields.
for datafield in _get_children_by_tag_name_rxp(root, 'datafield'):
subfields = []
for subfield in _get_children_by_tag_name_rxp(datafield, 'subfield'):
if subfield[CHILDREN]:
value = _get_children_as_string_rxp(subfield[CHILDREN])
subfields.append((subfield[ATTRS].get('code', '!'), value))
elif keep_singletons:
subfields.append((subfield[ATTRS].get('code', '!'), ''))
if subfields or keep_singletons:
# Create the field.
tag = datafield[ATTRS].get('tag', '!')
ind1 = datafield[ATTRS].get('ind1', '!')
ind2 = datafield[ATTRS].get('ind2', '!')
ind1, ind2 = _wash_indicators(ind1, ind2)
# Construct the field tuple.
field = (subfields, ind1, ind2, '', field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
return record
def _create_record_from_document(document,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record from the document (of type
xml.dom.minidom.Document or Ft.Xml.Domlette.Document)."""
root = None
for node in document.childNodes:
if node.nodeType == node.ELEMENT_NODE:
root = node
break
if root is None:
return {}
if root.tagName == 'collection':
children = _get_children_by_tag_name(root, 'record')
if not children:
return {}
root = children[0]
field_position_global = 1
record = {}
for controlfield in _get_children_by_tag_name(root, "controlfield"):
tag = controlfield.getAttributeNS(None, "tag").encode('utf-8')
text_nodes = controlfield.childNodes
value = ''.join([n.data for n in text_nodes]).encode("utf-8")
if value or keep_singletons:
field = ([], " ", " ", value, field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
for datafield in _get_children_by_tag_name(root, "datafield"):
subfields = []
for subfield in _get_children_by_tag_name(datafield, "subfield"):
value = _get_children_as_string(subfield.childNodes).encode("utf-8")
if value or keep_singletons:
code = subfield.getAttributeNS(None, 'code').encode("utf-8")
subfields.append((code or '!', value))
if subfields or keep_singletons:
tag = datafield.getAttributeNS(None, "tag").encode("utf-8") or '!'
ind1 = datafield.getAttributeNS(None, "ind1").encode("utf-8")
ind2 = datafield.getAttributeNS(None, "ind2").encode("utf-8")
ind1, ind2 = _wash_indicators(ind1, ind2)
field = (subfields, ind1, ind2, "", field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
return record
def _create_record_minidom(marcxml,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record using minidom."""
try:
dom = xml.dom.minidom.parseString(marcxml)
except xml.parsers.expat.ExpatError, ex1:
raise InvenioBibRecordParserError(str(ex1))
return _create_record_from_document(dom, keep_singletons=keep_singletons)
def _create_record_4suite(marcxml,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record using the 4suite parser."""
try:
dom = Ft.Xml.Domlette.NonvalidatingReader.parseString(marcxml,
"urn:dummy")
except Ft.Xml.ReaderException, ex1:
raise InvenioBibRecordParserError(ex1.message)
return _create_record_from_document(dom, keep_singletons=keep_singletons)
def _concat(alist):
"""Concats a list of lists"""
return [element for single_list in alist for element in single_list]
def _subfield_xml_output(subfield):
"""Generates the XML for a subfield object and return it as a string"""
return ' <subfield code="%s">%s</subfield>' % (subfield[0],
encode_for_xml(subfield[1]))
def _order_by_ord(field1, field2):
"""Function used to order the fields according to their ord value"""
return cmp(field1[1][4], field2[1][4])
def _order_by_tags(field1, field2):
"""Function used to order the fields according to the tags"""
return cmp(field1[0], field2[0])
def _get_children_by_tag_name(node, name):
"""Retrieves all children from node 'node' with name 'name' and
returns them as a list."""
try:
return [child for child in node.childNodes if child.nodeName == name]
except TypeError:
return []
def _get_children_by_tag_name_rxp(node, name):
"""Retrieves all children from 'children' with tag name 'tag' and
returns them as a list.
children is a list returned by the RXP parser"""
try:
return [child for child in node[CHILDREN] if child[TAG] == name]
except TypeError:
return []
def _get_children_as_string(node):
"""
Iterates through all the children of a node and returns one string
containing the values from all the text-nodes recursively.
"""
out = []
if node:
for child in node:
if child.nodeType == child.TEXT_NODE:
out.append(child.data)
else:
out.append(_get_children_as_string(child.childNodes))
return ''.join(out)
def _get_children_as_string_rxp(node):
"""
RXP version of _get_children_as_string():
Iterates through all the children of a node and returns one string
containing the values from all the text-nodes recursively.
"""
out = []
if node:
for child in node:
if type(child) is str:
out.append(child)
else:
out.append(_get_children_as_string_rxp(child[CHILDREN]))
return ''.join(out)
def _wash_indicators(*indicators):
"""
Washes the values of the indicators. An empty string or an
underscore is replaced by a blank space.
@param indicators: a series of indicators to be washed
@return: a list of washed indicators
"""
return [indicator in ('', '_') and ' ' or indicator
for indicator in indicators]
def _correct_record(record):
"""
Checks and corrects the structure of the record.
@param record: the record data structure
@return: a list of errors found
"""
errors = []
for tag in record.keys():
upper_bound = '999'
n = len(tag)
if n > 3:
i = n - 3
while i > 0:
upper_bound = '%s%s' % ('0', upper_bound)
i -= 1
# Missing tag. Replace it with dummy tag '000'.
if tag == '!':
errors.append((1, '(field number(s): ' +
str([f[4] for f in record[tag]]) + ')'))
record['000'] = record.pop(tag)
tag = '000'
elif not ('001' <= tag <= upper_bound or tag in ('FMT', 'FFT', 'BDR', 'BDM')):
errors.append(2)
record['000'] = record.pop(tag)
tag = '000'
fields = []
for field in record[tag]:
# Datafield without any subfield.
if field[0] == [] and field[3] == '':
errors.append((8, '(field number: ' + str(field[4]) + ')'))
subfields = []
for subfield in field[0]:
if subfield[0] == '!':
errors.append((3, '(field number: ' + str(field[4]) + ')'))
newsub = ('', subfield[1])
else:
newsub = subfield
subfields.append(newsub)
if field[1] == '!':
errors.append((4, '(field number: ' + str(field[4]) + ')'))
ind1 = " "
else:
ind1 = field[1]
if field[2] == '!':
errors.append((5, '(field number: ' + str(field[4]) + ')'))
ind2 = " "
else:
ind2 = field[2]
fields.append((subfields, ind1, ind2, field[3], field[4]))
record[tag] = fields
return errors
def _warning(code):
"""It returns a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd'
and appends str at the end"""
if isinstance(code, str):
return code
message = ''
if isinstance(code, tuple):
if isinstance(code[0], str):
message = code[1]
code = code[0]
return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
def _warnings(alist):
"""Applies the function _warning() to every element in l."""
return [_warning(element) for element in alist]
def _compare_lists(list1, list2, custom_cmp):
"""Compares twolists using given comparing function
@param list1: first list to compare
@param list2: second list to compare
@param custom_cmp: a function taking two arguments (element of
list 1, element of list 2) and
@return: True or False depending if the values are the same"""
if len(list1) != len(list2):
return False
for element1, element2 in zip(list1, list2):
if not custom_cmp(element1, element2):
return False
return True
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
class DeviceStorageTestCommon(object):
def is_sdcard_available(self):
ret_check_sdcard = self.marionette.execute_async_script("""
var request = window.wrappedJSObject.sdcard.available();
var flag = false;
request.onsuccess = function () {
if (this.result == "available") {
flag = true;
} else if (this.result == "unavailable" ||
this.result == "shared") {
console.log("sdcard is either unavailable or " +
"usb storage is enabled")
flag = false;
}
marionetteScriptFinished(flag);
};
request.onerror = function () {
flag = this.error.name;
console.log("Unable to get the space used by the sdcard " + flag)
marionetteScriptFinished(flag);
};
""", script_timeout=10000)
return ret_check_sdcard
def add_namedfile_sdcard(self, file_name, file_contents):
ret_namedfile_sdcard = self.marionette.execute_async_script("""
var file_name = arguments[0];
var file_contents = arguments[1]
//create a file with contents
var file = new Blob([file_contents], {type: "text/plain"});
var request = window.wrappedJSObject.sdcard.addNamed(file, file_name);
request.onsuccess = function () {
var name = this.result;
marionetteScriptFinished(true);
};
request.onerror = function () {
console.log("Unable to write the file: " + this.error.name);
marionetteScriptFinished("Unable to write the file: " + this.error.name);
};
""", script_args=[file_name, file_contents])
return ret_namedfile_sdcard
def get_file_sdcard(self, file_name):
get_filename_sdcard = self.marionette.execute_async_script("""
var file_name = arguments[0];
var request = window.wrappedJSObject.sdcard.get(file_name);
request.onsuccess = function () {
//file name will be stored in this.result.name
marionetteScriptFinished(true);
};
request.onerror = function () {
console.log("Unable to get the file: " + this.error.name);
marionetteScriptFinished(false);
};
""", script_args=[file_name])
return get_filename_sdcard
def delete_file_sdcard(self, file_name):
ret_file_delete_sdcard = self.marionette.execute_async_script("""
var delete_file = arguments[0];
var request = window.wrappedJSObject.sdcard.delete(delete_file);
request.onsuccess = function () {
marionetteScriptFinished(true);
}
request.onerror = function (error) {
console.log('Unable to remove the file: ' + this.error.name);
marionetteScriptFinished('Unable to remove the file: ' + this.error.name);
}
""", script_args=[file_name])
return ret_file_delete_sdcard
def enumerate_files_sdcard(self):
ret_filelist_sdcard_unicode = self.marionette.execute_async_script("""
var cursor = window.wrappedJSObject.sdcard.enumerate();
var file_list = [];
cursor.onsuccess = function () {
if (this.result) {
var file = this.result;
file_list.push(file.name);
// Once we found a file we check if there are other results
// Then we move to the next result, which calls the cursor
// success possibly with the next file as result.
this.continue();
} else {
marionetteScriptFinished(file_list);
}
};
""", script_timeout=20000)
return ret_filelist_sdcard_unicode
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
# django imports
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# Main FileBrowser Directory. Relative to site.storage.location.
# DO NOT USE A SLASH AT THE BEGINNING, DO NOT FORGET THE TRAILING SLASH AT THE END.
DIRECTORY = getattr(settings, "FILEBROWSER_DIRECTORY", 'uploads/')
# EXTENSIONS AND FORMATS
# Allowed Extensions for File Upload. Lower case is important.
EXTENSIONS = getattr(settings, "FILEBROWSER_EXTENSIONS", {
'Image': ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff'],
'Document': ['.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.docx'],
'Video': ['.mov', '.mp4', '.m4v', '.webm', '.wmv', '.mpeg', '.mpg', '.avi', '.rm'],
'Audio': ['.mp3', '.wav', '.aiff', '.midi', '.m4p']
})
# Define different formats for allowed selections.
# This has to be a subset of EXTENSIONS.
# e.g., add ?type=image to the browse-URL ...
SELECT_FORMATS = getattr(settings, "FILEBROWSER_SELECT_FORMATS", {
'file': ['Image', 'Document', 'Video', 'Audio'],
'image': ['Image'],
'document': ['Document'],
'media': ['Video', 'Audio'],
})
# VERSIONS
# Directory to Save Image Versions (and Thumbnails). Relative to site.storage.location.
# If no directory is given, versions are stored within the Image directory.
# VERSION URL: VERSIONS_BASEDIR/original_path/originalfilename_versionsuffix.extension
VERSIONS_BASEDIR = getattr(settings, 'FILEBROWSER_VERSIONS_BASEDIR', '_versions')
# Versions Format. Available Attributes: verbose_name, width, height, opts
VERSIONS = getattr(settings, "FILEBROWSER_VERSIONS", {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'thumbnail': {'verbose_name': 'Thumbnail (1 col)', 'width': 60, 'height': 60, 'opts': 'crop'},
'small': {'verbose_name': 'Small (2 col)', 'width': 140, 'height': '', 'opts': ''},
'medium': {'verbose_name': 'Medium (4col )', 'width': 300, 'height': '', 'opts': ''},
'big': {'verbose_name': 'Big (6 col)', 'width': 460, 'height': '', 'opts': ''},
'large': {'verbose_name': 'Large (8 col)', 'width': 680, 'height': '', 'opts': ''},
})
# Quality of saved versions
VERSION_QUALITY = getattr(settings, 'FILEBROWSER_VERSION_QUALITY', 90)
# Versions available within the Admin-Interface.
ADMIN_VERSIONS = getattr(settings, 'FILEBROWSER_ADMIN_VERSIONS', ['thumbnail', 'small', 'medium', 'big', 'large'])
# Which Version should be used as Admin-thumbnail.
ADMIN_THUMBNAIL = getattr(settings, 'FILEBROWSER_ADMIN_THUMBNAIL', 'admin_thumbnail')
# PLACEHOLDER
# Path to placeholder image (relative to storage location)
PLACEHOLDER = getattr(settings, "FILEBROWSER_PLACEHOLDER", "")
# Show Placeholder if the original image does not exist
SHOW_PLACEHOLDER = getattr(settings, "FILEBROWSER_SHOW_PLACEHOLDER", False)
# Always show placeholder (even if the original image exists)
FORCE_PLACEHOLDER = getattr(settings, "FILEBROWSER_FORCE_PLACEHOLDER", False)
# EXTRA SETTINGS
# If set to True, the FileBrowser will not try to import a mis-installed PIL.
STRICT_PIL = getattr(settings, 'FILEBROWSER_STRICT_PIL', False)
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
IMAGE_MAXBLOCK = getattr(settings, 'FILEBROWSER_IMAGE_MAXBLOCK', 1024 * 1024)
# Exclude files matching any of the following regular expressions
# Default is to exclude 'thumbnail' style naming of image-thumbnails.
EXTENSION_LIST = []
for exts in EXTENSIONS.values():
EXTENSION_LIST += exts
EXCLUDE = getattr(settings, 'FILEBROWSER_EXCLUDE', (r'_(%(exts)s)_.*_q\d{1,3}\.(%(exts)s)' % {'exts': ('|'.join(EXTENSION_LIST))},))
# Max. Upload Size in Bytes.
MAX_UPLOAD_SIZE = getattr(settings, "FILEBROWSER_MAX_UPLOAD_SIZE", 10485760)
# Normalize filename and remove all non-alphanumeric characters
# except for underscores, spaces & dashes.
NORMALIZE_FILENAME = getattr(settings, "FILEBROWSER_NORMALIZE_FILENAME", False)
# Convert Filename (replace spaces and convert to lowercase)
CONVERT_FILENAME = getattr(settings, "FILEBROWSER_CONVERT_FILENAME", True)
# Max. Entries per Page
# Loading a Sever-Directory with lots of files might take a while
# Use this setting to limit the items shown
LIST_PER_PAGE = getattr(settings, "FILEBROWSER_LIST_PER_PAGE", 50)
# Default Sorting
# Options: date, filesize, filename_lower, filetype_checked
DEFAULT_SORTING_BY = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_BY", "date")
# Sorting Order: asc, desc
DEFAULT_SORTING_ORDER = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_ORDER", "desc")
# regex to clean dir names before creation
FOLDER_REGEX = getattr(settings, "FILEBROWSER_FOLDER_REGEX", r'^[\w._\ /-]+$')
# Traverse directories when searching
SEARCH_TRAVERSE = getattr(settings, "FILEBROWSER_SEARCH_TRAVERSE", False)
# Default Upload and Version Permissions
DEFAULT_PERMISSIONS = getattr(settings, "FILEBROWSER_DEFAULT_PERMISSIONS", 0o755)
# Overwrite existing files on upload
OVERWRITE_EXISTING = getattr(settings, "FILEBROWSER_OVERWRITE_EXISTING", True)
# UPLOAD
# Directory to Save temporary uploaded files (FileBrowseUploadField)
# Relative to site.storage.location.
UPLOAD_TEMPDIR = getattr(settings, 'FILEBROWSER_UPLOAD_TEMPDIR', '_temp')
# EXTRA TRANSLATION STRINGS
# The following strings are not available within views or templates
_('Folder')
_('Image')
_('Video')
_('Document')
_('Audio')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
import os, glob
from test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class pget_bugTestCase(unittest.TestCase):
"""Verify that cursor.pget works properly"""
db_name = 'test-cursor_pget.db'
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.primary_db = db.DB(self.env)
self.primary_db.open(self.db_name, 'primary', db.DB_BTREE, db.DB_CREATE)
self.secondary_db = db.DB(self.env)
self.secondary_db.set_flags(db.DB_DUP)
self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE)
self.primary_db.associate(self.secondary_db, lambda key, data: data)
self.primary_db.put('salad', 'eggs')
self.primary_db.put('spam', 'ham')
self.primary_db.put('omelet', 'eggs')
def tearDown(self):
self.secondary_db.close()
self.primary_db.close()
self.env.close()
del self.secondary_db
del self.primary_db
del self.env
test_support.rmtree(self.homeDir)
def test_pget(self):
cursor = self.secondary_db.cursor()
self.assertEqual(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET))
self.assertEqual(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP))
self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP))
self.assertEqual(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET))
self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP))
cursor.close()
def test_suite():
return unittest.makeSuite(pget_bugTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='basic branch output coloring'
. ./test-lib.sh
test_expect_success 'set up some sample branches' '
test_commit foo &&
git branch -M main &&
git update-ref refs/remotes/origin/main HEAD &&
git update-ref refs/heads/other HEAD
'
# choose non-default colors to make sure config
# is taking effect
test_expect_success 'set up some color config' '
git config color.branch.local blue &&
git config color.branch.remote yellow &&
git config color.branch.current cyan
'
test_expect_success 'regular output shows colors' '
cat >expect <<-\EOF &&
* <CYAN>main<RESET>
<BLUE>other<RESET>
<YELLOW>remotes/origin/main<RESET>
EOF
git branch --color -a >actual.raw &&
test_decode_color <actual.raw >actual &&
test_cmp expect actual
'
test_expect_success 'verbose output shows colors' '
oid=$(git rev-parse --short HEAD) &&
cat >expect <<-EOF &&
* <CYAN>main <RESET> $oid foo
<BLUE>other <RESET> $oid foo
<YELLOW>remotes/origin/main<RESET> $oid foo
EOF
git branch --color -v -a >actual.raw &&
test_decode_color <actual.raw >actual &&
test_cmp expect actual
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t3205-branch-color.sh
|
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
def test_valid_degree_sequence1():
n = 100
p = .3
for i in range(10):
G = nx.erdos_renyi_graph(n,p)
deg = list(G.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_valid_degree_sequence2():
n = 100
for i in range(10):
G = nx.barabasi_albert_graph(n,1)
deg = list(G.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
@raises(nx.NetworkXException)
def test_string_input():
a = nx.is_valid_degree_sequence([],'foo')
def test_negative_input():
assert_false(nx.is_valid_degree_sequence([-1],'hh'))
assert_false(nx.is_valid_degree_sequence([-1],'eg'))
assert_false(nx.is_valid_degree_sequence([72.5],'eg'))
class TestAtlas(object):
@classmethod
def setupClass(cls):
global atlas
import platform
if platform.python_implementation()=='Jython':
raise SkipTest('graph atlas not available under Jython.')
import networkx.generators.atlas as atlas
def setUp(self):
self.GAG=atlas.graph_atlas_g()
def test_atlas(self):
for graph in self.GAG:
deg = list(graph.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_small_graph_true():
z=[5,3,3,3,3,2,2,2,1,1,1]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[10,3,3,3,3,2,2,2,2,2,2]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
def test_small_graph_false():
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[6,5,4,4,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
def test_directed_degree_sequence():
# Test a range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G = nx.erdos_renyi_graph(n,p*(i+1),None,True)
din = list(G.in_degree().values())
dout = list(G.out_degree().values())
assert_true(nx.is_digraphical(din, dout))
def test_small_directed_sequences():
dout=[5,3,3,3,3,2,2,2,1,1,1]
din=[3,3,3,3,3,2,2,2,2,2,1]
assert_true(nx.is_digraphical(din, dout))
# Test nongraphical directed sequence
dout = [1000,3,3,3,3,2,2,2,1,1,1]
din=[103,102,102,102,102,102,102,102,102,102]
assert_false(nx.is_digraphical(din, dout))
# Test digraphical small sequence
dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1]
assert_true(nx.is_digraphical(din, dout))
# Test nonmatching sum
din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1]
assert_false(nx.is_digraphical(din, dout))
# Test for negative integer in sequence
din=[2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4]
assert_false(nx.is_digraphical(din, dout))
def test_multi_sequence():
# Test nongraphical multi sequence
seq=[1000,3,3,3,3,2,2,2,1,1]
assert_false(nx.is_multigraphical(seq))
# Test small graphical multi sequence
seq=[6,5,4,4,2,1,1,1]
assert_true(nx.is_multigraphical(seq))
# Test for negative integer in sequence
seq=[6,5,4,-4,2,1,1,1]
assert_false(nx.is_multigraphical(seq))
# Test for sequence with odd sum
seq=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(nx.is_multigraphical(seq))
def test_pseudo_sequence():
# Test small valid pseudo sequence
seq=[1000,3,3,3,3,2,2,2,1,1]
assert_true(nx.is_pseudographical(seq))
# Test for sequence with odd sum
seq=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(nx.is_pseudographical(seq))
# Test for negative integer in sequence
seq=[1000,3,3,3,3,2,2,-2,1,1]
assert_false(nx.is_pseudographical(seq))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/clusterunique"
"github.com/cockroachdb/cockroach/pkg/sql/exprutil"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/asof"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/idxtype"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessionprotectedts"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
type showFingerprintsNode struct {
zeroInputPlanNode
columns colinfo.ResultColumns
tableDesc catalog.TableDescriptor
indexes []catalog.Index
tenantSpec tenantSpec
options *resolvedShowTenantFingerprintOptions
experimental bool
run showFingerprintsRun
}
// ShowFingerprints statement fingerprints the data in each index of a table.
// For each index, a full index scan is run to hash every row with the fnv64
// hash. For the primary index, all table columns are included in the hash,
// whereas for secondary indexes, the index cols + the primary index cols + the
// STORING cols are included. The hashed rows are all combined with XOR using
// distsql.
//
// To extract the fingerprints at some point in the past, the following
// query can be used:
//
// SELECT * FROM [SHOW FINGERPRINTS FROM TABLE foo] AS OF SYSTEM TIME xxx
func (p *planner) ShowFingerprints(
ctx context.Context, n *tree.ShowFingerprints,
) (planNode, error) {
var op string
if n.Experimental {
op = "SHOW EXPERIMENTAL_FINGERPRINTS"
} else {
op = "SHOW FINGERPRINTS"
}
evalOptions, err := evalShowFingerprintOptions(ctx, n.Options, p.EvalContext(), p.SemaCtx(),
op, p.ExprEvaluator(op))
if err != nil {
return nil, err
}
if n.TenantSpec != nil {
// Tenant fingerprints use the KV fingerprint method and can't exclude columns this way
if evalOptions.excludedUserColumns != nil {
err = pgerror.New(pgcode.InvalidParameterValue, "cannot use the EXCLUDE COLUMNS option when fingerprinting a tenant.")
return nil, err
}
return p.planShowTenantFingerprint(ctx, n.TenantSpec, n.Experimental, evalOptions)
}
// Only allow this for virtual clusters as it uses the KV fingerprint method instead of SQL
if !evalOptions.startTimestamp.IsEmpty() {
err = pgerror.New(pgcode.InvalidParameterValue, "cannot use the START TIMESTAMP option when fingerprinting a table.")
return nil, err
}
// We avoid the cache so that we can observe the fingerprints without
// taking a lease, like other SHOW commands.
tableDesc, err := p.ResolveUncachedTableDescriptorEx(
ctx, n.Table, true /*required*/, tree.ResolveRequireTableDesc)
if err != nil {
return nil, err
}
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return nil, err
}
return &showFingerprintsNode{
columns: colinfo.ShowFingerprintsColumns,
tableDesc: tableDesc,
indexes: tableDesc.ActiveIndexes(),
options: evalOptions,
experimental: n.Experimental,
}, nil
}
type resolvedShowTenantFingerprintOptions struct {
startTimestamp hlc.Timestamp
excludedUserColumns []string
}
func evalShowFingerprintOptions(
ctx context.Context,
options tree.ShowFingerprintOptions,
evalCtx *eval.Context,
semaCtx *tree.SemaContext,
op string,
eval exprutil.Evaluator,
) (*resolvedShowTenantFingerprintOptions, error) {
r := &resolvedShowTenantFingerprintOptions{}
if options.StartTimestamp != nil {
ts, err := asof.EvalSystemTimeExpr(ctx, evalCtx, semaCtx, options.StartTimestamp, op, asof.ShowTenantFingerprint)
if err != nil {
return nil, err
}
r.startTimestamp = ts
}
if options.ExcludedUserColumns != nil {
cols, err := eval.StringArray(
ctx, tree.Exprs(options.ExcludedUserColumns))
if err != nil {
return nil, err
}
r.excludedUserColumns = cols
}
return r, nil
}
func (p *planner) planShowTenantFingerprint(
ctx context.Context,
ts *tree.TenantSpec,
experimental bool,
evalOptions *resolvedShowTenantFingerprintOptions,
) (planNode, error) {
if err := CanManageTenant(ctx, p); err != nil {
return nil, err
}
if err := rejectIfCantCoordinateMultiTenancy(p.execCfg.Codec, "fingerprint", p.execCfg.Settings); err != nil {
return nil, err
}
var op string
if experimental {
op = "SHOW EXPERIMENTAL_FINGERPRINTS FROM VIRTUAL CLUSTER"
} else {
op = "SHOW FINGERPRINTS FROM VIRTUAL CLUSTER"
}
tspec, err := p.planTenantSpec(ctx, ts, op)
if err != nil {
return nil, err
}
return &showFingerprintsNode{
columns: colinfo.ShowTenantFingerprintsColumns,
tenantSpec: tspec,
options: evalOptions,
experimental: experimental,
}, nil
}
// showFingerprintsRun contains the run-time state of
// showFingerprintsNode during local execution.
type showFingerprintsRun struct {
rowIdx int
// values stores the current row, updated by Next().
values []tree.Datum
}
func (n *showFingerprintsNode) startExec(params runParams) error {
if n.tenantSpec != nil {
n.run.values = []tree.Datum{tree.DNull, tree.DNull, tree.DNull, tree.DNull}
return nil
}
if n.experimental {
params.p.BufferClientNotice(
params.ctx,
pgnotice.Newf("SHOW EXPERIMENTAL_FINGERPRINTS is deprecated. Use SHOW FINGERPRINTS instead."),
)
}
n.run.values = []tree.Datum{tree.DNull, tree.DNull}
return nil
}
// protectTenantSpanWithSession creates a protected timestamp record
// for the given tenant ID at the read timestamp of the current
// transaction. The PTS record will be tied to the given sessionID.
//
// The caller should call the returned cleanup function to release the
// PTS record.
func protectTenantSpanWithSession(
ctx context.Context,
execCfg *ExecutorConfig,
tenantID roachpb.TenantID,
sessionID clusterunique.ID,
tsToProtect hlc.Timestamp,
) (func(), error) {
ptsRecordID := uuid.MakeV4()
ptsRecord := sessionprotectedts.MakeRecord(
ptsRecordID,
// TODO(ssd): The type here seems weird. I think this
// is correct in that we use this to compare against
// the session_id table which returns the stringified
// session ID. But, maybe we can make this clearer.
[]byte(sessionID.String()),
tsToProtect,
ptpb.MakeTenantsTarget([]roachpb.TenantID{tenantID}),
)
log.Dev.Infof(ctx, "protecting timestamp: %#+v", ptsRecord)
if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error {
pts := execCfg.ProtectedTimestampProvider.WithTxn(txn)
return pts.Protect(ctx, ptsRecord)
}); err != nil {
return nil, err
}
releasePTS := func() {
if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error {
pts := execCfg.ProtectedTimestampProvider.WithTxn(txn)
return pts.Release(ctx, ptsRecordID)
}); err != nil {
log.Dev.Warningf(ctx, "failed to release protected timestamp %s: %v", ptsRecordID, err)
}
}
return releasePTS, nil
}
func (n *showFingerprintsNode) nextTenant(params runParams) (bool, error) {
if n.run.rowIdx > 0 {
return false, nil
}
tinfo, err := n.tenantSpec.getTenantInfo(params.ctx, params.p)
if err != nil {
return false, err
}
tid, err := roachpb.MakeTenantID(tinfo.ID)
if err != nil {
return false, err
}
// We want to write a protected timestamp record at the earliest timestamp
// that the fingerprint query is going to read from. When fingerprinting
// revisions, this will be the specified start time.
tsToProtect := params.p.EvalContext().Txn.ReadTimestamp()
if n.options != nil && !n.options.startTimestamp.IsEmpty() {
if !n.options.startTimestamp.LessEq(tsToProtect) {
return false, pgerror.Newf(pgcode.InvalidParameterValue, `start timestamp %s is greater than the end timestamp %s`,
n.options.startTimestamp.String(), tsToProtect.String())
}
tsToProtect = n.options.startTimestamp
}
cleanup, err := protectTenantSpanWithSession(
params.ctx,
params.p.ExecCfg(),
tid,
params.p.ExtendedEvalContext().SessionID,
tsToProtect,
)
if err != nil {
return false, err
}
defer cleanup()
var startTime hlc.Timestamp
var allRevisions bool
if n.options != nil && !n.options.startTimestamp.IsEmpty() {
startTime = n.options.startTimestamp
allRevisions = true
}
// TODO(dt): remove conditional if we make MakeTenantSpan do this.
span := keys.MakeTenantSpan(tid)
if tid.IsSystem() {
span = roachpb.Span{Key: keys.TableDataMin, EndKey: keys.TableDataMax}
}
fingerprint, err := params.p.FingerprintSpan(params.ctx,
span,
startTime,
allRevisions,
false /* stripped */)
if err != nil {
return false, err
}
endTime := hlc.Timestamp{
WallTime: params.p.EvalContext().GetTxnTimestamp(time.Microsecond).UnixNano(),
}
n.run.values[0] = tree.NewDString(string(tinfo.Name))
if !startTime.IsEmpty() {
n.run.values[1] = eval.TimestampToDecimalDatum(startTime)
}
n.run.values[2] = eval.TimestampToDecimalDatum(endTime)
n.run.values[3] = tree.NewDInt(tree.DInt(fingerprint))
n.run.rowIdx++
return true, nil
}
func (n *showFingerprintsNode) Next(params runParams) (bool, error) {
if n.tenantSpec != nil {
return n.nextTenant(params)
}
if n.run.rowIdx >= len(n.indexes) {
return false, nil
}
index := n.indexes[n.run.rowIdx]
// Skip inverted indexes. Experimental fingerprint uses a query that forces
// the use of an index and that is incompatible with inverted indexes.
if index.GetType() == idxtype.INVERTED {
n.run.rowIdx++
return n.Next(params)
}
excludedColumns := []string{}
if n.options != nil && len(n.options.excludedUserColumns) > 0 {
excludedColumns = append(excludedColumns, n.options.excludedUserColumns...)
}
var sql string
var err error
if n.experimental {
sql, err = BuildExperimentalFingerprintQueryForIndex(n.tableDesc, index, excludedColumns)
} else {
sql, err = BuildFingerprintQueryForIndex(n.tableDesc, index, excludedColumns)
}
if err != nil {
return false, err
}
// If we're in an AOST context, propagate it to the inner statement so that
// the inner statement gets planned with planner.avoidLeasedDescriptors set,
// like the outer one.
if params.p.EvalContext().AsOfSystemTime != nil {
ts := params.p.txn.ReadTimestamp()
sql = sql + " AS OF SYSTEM TIME " + ts.AsOfSystemTime()
}
fingerprintCols, err := params.p.InternalSQLTxn().QueryRowEx(
params.ctx, "hash-fingerprint",
params.p.txn,
sessiondata.NodeUserSessionDataOverride,
sql,
)
if err != nil {
return false, err
}
if len(fingerprintCols) != 1 {
return false, errors.AssertionFailedf(
"unexpected number of columns returned: 1 vs %d",
len(fingerprintCols))
}
fingerprint := fingerprintCols[0]
n.run.values[0] = tree.NewDString(index.GetName())
n.run.values[1] = fingerprint
n.run.rowIdx++
return true, nil
}
func makeColumnNameOrExpr(col catalog.Column) string {
if col.IsExpressionIndexColumn() {
return fmt.Sprintf("(%s)", col.GetComputeExpr())
} else {
name := col.GetName()
return tree.NameStringP(&name)
}
}
func addColumnsForIndex(
index catalog.Index, tableDesc catalog.TableDescriptor, addColumn func(catalog.Column),
) error {
if index.Primary() {
for _, col := range tableDesc.PublicColumns() {
addColumn(col)
}
} else {
for i := 0; i < index.NumKeyColumns(); i++ {
col, err := catalog.MustFindColumnByID(tableDesc, index.GetKeyColumnID(i))
if err != nil {
return err
}
addColumn(col)
}
for i := 0; i < index.NumKeySuffixColumns(); i++ {
col, err := catalog.MustFindColumnByID(tableDesc, index.GetKeySuffixColumnID(i))
if err != nil {
return err
}
addColumn(col)
}
for i := 0; i < index.NumSecondaryStoredColumns(); i++ {
col, err := catalog.MustFindColumnByID(tableDesc, index.GetStoredColumnID(i))
if err != nil {
return err
}
addColumn(col)
}
}
return nil
}
// Experimental version is still provided to ease testing transition. Once we no longer
// reference this in tests, we should delete this code.
func BuildExperimentalFingerprintQueryForIndex(
tableDesc catalog.TableDescriptor, index catalog.Index, ignoredColumns []string,
) (string, error) {
cols := make([]string, 0, len(tableDesc.PublicColumns()))
var numBytesCols int
addColumn := func(col catalog.Column) {
if slices.Contains(ignoredColumns, col.GetName()) {
return
}
colNameOrExpr := makeColumnNameOrExpr(col)
// TODO(dan): This is known to be a flawed way to fingerprint. Any datum
// with the same string representation is fingerprinted the same, even
// if they're different types.
switch col.GetType().Family() {
case types.BytesFamily:
cols = append(cols, fmt.Sprintf("%s:::bytes", colNameOrExpr))
numBytesCols++
case types.StringFamily:
cols = append(cols, fmt.Sprintf("%s:::string", colNameOrExpr))
default:
cols = append(cols, fmt.Sprintf("%s::string", colNameOrExpr))
}
}
if err := addColumnsForIndex(index, tableDesc, addColumn); err != nil {
return "", err
}
if len(cols) != numBytesCols && numBytesCols != 0 {
// Currently, cols has a mix of BYTES and STRING types, but fnv64
// requires all arguments to be of the same type. We'll cast less
// frequent type to the other.
from, to := "::bytes", "::string"
if numBytesCols > len(cols)/2 {
// BYTES is more frequent.
from, to = "::string", "::bytes"
}
for i := range cols {
if strings.HasSuffix(cols[i], from) {
cols[i] = cols[i] + to
}
}
}
// The fnv64 hash was chosen mostly due to speed. I did an AS OF SYSTEM TIME
// fingerprint over 31GiB on a 4 node production cluster (with no other
// traffic to try and keep things comparable). The cluster was restarted in
// between each run. Resulting times:
//
// fnv => 17m
// sha512 => 1h6m
// sha265 => 1h6m
// fnv64 (again) => 17m
//
// TODO(dan): If/when this ever loses its EXPERIMENTAL prefix and gets
// exposed to users, consider adding a version to the fingerprint output.
sql := fmt.Sprintf(`SELECT
xor_agg(fnv64(%s))::string AS fingerprint
FROM [%d AS t]@{FORCE_INDEX=[%d]}
`, strings.Join(cols, `,`), tableDesc.GetID(), index.GetID())
if index.IsPartial() {
sql = fmt.Sprintf("%s WHERE %s", sql, index.GetPredicate())
}
return sql, nil
}
func BuildFingerprintQueryForIndex(
tableDesc catalog.TableDescriptor, index catalog.Index, ignoredColumns []string,
) (string, error) {
cols := make([]string, 0, len(tableDesc.PublicColumns()))
addColumn := func(col catalog.Column) {
if slices.Contains(ignoredColumns, col.GetName()) {
return
}
cols = append(cols, makeColumnNameOrExpr(col))
}
if err := addColumnsForIndex(index, tableDesc, addColumn); err != nil {
return "", err
}
// The fnv64 hash was chosen mostly due to speed. I did an AS OF SYSTEM TIME
// fingerprint over 31GiB on a 4 node production cluster (with no other
// traffic to try and keep things comparable). The cluster was restarted in
// between each run. Resulting times:
//
// fnv => 17m
// sha512 => 1h6m
// sha265 => 1h6m
// fnv64 (again) => 17m
//
sql := fmt.Sprintf(
`SELECT lpad(to_hex(xor_agg(fnv64(crdb_internal.datums_to_bytes(%s):::BYTES))), 16, '0') AS fingerprint
FROM [%d AS t]@{FORCE_INDEX=[%d]}`,
strings.Join(cols, `,`),
tableDesc.GetID(),
index.GetID(),
)
if index.IsPartial() {
sql = fmt.Sprintf("%s WHERE %s", sql, index.GetPredicate())
}
return sql, nil
}
func (n *showFingerprintsNode) Values() tree.Datums { return n.run.values }
func (n *showFingerprintsNode) Close(_ context.Context) {}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/sql/show_fingerprints.go
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ManagedPrivateEndpointsClientConfiguration
from .operations import ManagedPrivateEndpointsOperations
from . import models
class ManagedPrivateEndpointsClient(object):
"""ManagedPrivateEndpointsClient.
:ivar managed_private_endpoints: ManagedPrivateEndpointsOperations operations
:vartype managed_private_endpoints: azure.synapse.managedprivateendpoints.operations.ManagedPrivateEndpointsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: The workspace development endpoint, for example https://myworkspace.dev.azuresynapse.net.
:type endpoint: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{endpoint}'
self._config = ManagedPrivateEndpointsClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.managed_private_endpoints = ManagedPrivateEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ManagedPrivateEndpointsClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Creates or updates a virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to update virtual network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListResult"]
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'} # type: ignore
def list_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListConnectionsResult"]
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGatewayListConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Resets the primary of the virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to the begin reset of the
active-active feature enabled gateway.
:type gateway_vip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def _reset_vpn_client_shared_key_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._reset_vpn_client_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_vpn_client_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def begin_reset_vpn_client_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resets the VPN client shared key of the virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def _generatevpnclientpackage_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generatevpnclientpackage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevpnclientpackage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def begin_generatevpnclientpackage(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN client package for P2S client of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def _generate_vpn_profile_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_vpn_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generate_vpn_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def begin_generate_vpn_profile(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN profile for P2S client of the virtual network gateway in the specified resource
group. Used for IKEV2 and radius based authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def _get_vpn_profile_package_url_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpn_profile_package_url_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpn_profile_package_url_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def begin_get_vpn_profile_package_url(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Gets pre-generated VPN profile for P2S client of the virtual network gateway in the specified
resource group. The profile needs to be generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def _get_bgp_peer_status_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BgpPeerStatusListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BgpPeerStatusListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_bgp_peer_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_bgp_peer_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def begin_get_bgp_peer_status(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BgpPeerStatusListResult"]
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BgpPeerStatusListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.BgpPeerStatusListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpPeerStatusListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def supported_vpn_devices(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.supported_vpn_devices.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'} # type: ignore
def _get_learned_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_learned_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_learned_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def begin_get_learned_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway has learned, including
routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def _get_advertised_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_advertised_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_advertised_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def begin_get_advertised_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway is advertising to the
specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def _set_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientIPsecParameters"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientIPsecParameters"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def begin_set_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec policy for P2S client of
virtual network gateway in the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set vpnclient ipsec parameters
of Virtual Network Gateway P2S client operation through Network resource provider.
:type vpnclient_ipsec_params: ~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def _get_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnClientIPsecParameters"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def begin_get_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Get VpnclientIpsecParameters operation retrieves information about the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def vpn_device_configuration_script(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.VpnDeviceScriptParameters"
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection for which the configuration script is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device script operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnDeviceScriptParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.vpn_device_configuration_script.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'} # type: ignore
def _start_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def begin_start_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Starts packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to start packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def _stop_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def begin_stop_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Stops packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to stop packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def _get_vpnclient_connection_health_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientConnectionHealthDetailListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientConnectionHealthDetailListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_connection_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_connection_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def begin_get_vpnclient_connection_health(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientConnectionHealthDetailListResult"]
"""Get VPN client connection health detail per P2S client connection of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientConnectionHealthDetailListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientConnectionHealthDetailListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientConnectionHealthDetailListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_connection_health_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_connection_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def _disconnect_virtual_network_gateway_vpn_connections_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._disconnect_virtual_network_gateway_vpn_connections_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'P2SVpnConnectionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disconnect_virtual_network_gateway_vpn_connections_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
def begin_disconnect_virtual_network_gateway_vpn_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Disconnect vpn connections of virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param request: The parameters are supplied to disconnect vpn connections.
:type request: ~azure.mgmt.network.v2020_07_01.models.P2SVpnConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._disconnect_virtual_network_gateway_vpn_connections_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disconnect_virtual_network_gateway_vpn_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
|
unknown
|
codeparrot/codeparrot-clean
| ||
global:
metric_name_validation_scheme: legacy
metric_name_escaping_scheme: dots
scrape_configs:
- job_name: prometheus
|
unknown
|
github
|
https://github.com/prometheus/prometheus
|
config/testdata/scrape_config_global_validation_mode.yml
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import os
import sys
class PersistentMixin:
"""Mixin class which provides facilities for persisting and restoring."""
@classmethod
def AssertWritable(cls, filename):
"""Raises an IOError if filename is not writable."""
persist_dir = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(persist_dir):
raise IOError('Directory does not exist: %s' % persist_dir)
if os.path.exists(filename):
if not os.access(filename, os.W_OK):
raise IOError('Need write permission on file: %s' % filename)
elif not os.access(persist_dir, os.W_OK):
raise IOError('Need write permission on directory: %s' % persist_dir)
@classmethod
def Load(cls, filename):
"""Load an instance from filename."""
return cPickle.load(open(filename, 'rb'))
def Persist(self, filename):
"""Persist all state to filename."""
try:
original_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(2**31-1) # Lock out other threads so nothing can
# modify |self| during pickling.
pickled_self = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
finally:
sys.setcheckinterval(original_checkinterval)
with open(filename, 'wb') as f:
f.write(pickled_self)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* jcdiffct.c
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1997, Thomas G. Lane.
* Lossless JPEG Modifications:
* Copyright (C) 1999, Ken Murchison.
* libjpeg-turbo Modifications:
* Copyright (C) 2022, 2024, D. R. Commander.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
* This file contains the difference buffer controller for compression.
* This controller is the top level of the lossless JPEG compressor proper.
* The difference buffer lies between the prediction/differencing and entropy
* encoding steps.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jlossls.h" /* Private declarations for lossless codec */
#ifdef C_LOSSLESS_SUPPORTED
/* We use a full-image sample buffer when doing Huffman optimization,
* and also for writing multiple-scan JPEG files. In all cases, the
* full-image buffer is filled during the first pass, and the scaling,
* prediction and differencing steps are run during subsequent passes.
*/
#ifdef ENTROPY_OPT_SUPPORTED
#define FULL_SAMP_BUFFER_SUPPORTED
#else
#ifdef C_MULTISCAN_FILES_SUPPORTED
#define FULL_SAMP_BUFFER_SUPPORTED
#endif
#endif
/* Private buffer controller object */
typedef struct {
struct jpeg_c_coef_controller pub; /* public fields */
JDIMENSION iMCU_row_num; /* iMCU row # within image */
JDIMENSION mcu_ctr; /* counts MCUs processed in current row */
int MCU_vert_offset; /* counts MCU rows within iMCU row */
int MCU_rows_per_iMCU_row; /* number of such rows needed */
_JSAMPROW cur_row[MAX_COMPONENTS]; /* row of point-transformed samples */
_JSAMPROW prev_row[MAX_COMPONENTS]; /* previous row of Pt'd samples */
JDIFFARRAY diff_buf[MAX_COMPONENTS]; /* iMCU row of differences */
/* In multi-pass modes, we need a virtual sample array for each component. */
jvirt_sarray_ptr whole_image[MAX_COMPONENTS];
} my_diff_controller;
typedef my_diff_controller *my_diff_ptr;
/* Forward declarations */
METHODDEF(boolean) compress_data(j_compress_ptr cinfo, _JSAMPIMAGE input_buf);
#ifdef FULL_SAMP_BUFFER_SUPPORTED
METHODDEF(boolean) compress_first_pass(j_compress_ptr cinfo,
_JSAMPIMAGE input_buf);
METHODDEF(boolean) compress_output(j_compress_ptr cinfo,
_JSAMPIMAGE input_buf);
#endif
LOCAL(void)
start_iMCU_row(j_compress_ptr cinfo)
/* Reset within-iMCU-row counters for a new row */
{
my_diff_ptr diff = (my_diff_ptr)cinfo->coef;
/* In an interleaved scan, an MCU row is the same as an iMCU row.
* In a noninterleaved scan, an iMCU row has v_samp_factor MCU rows.
* But at the bottom of the image, process only what's left.
*/
if (cinfo->comps_in_scan > 1) {
diff->MCU_rows_per_iMCU_row = 1;
} else {
if (diff->iMCU_row_num < (cinfo->total_iMCU_rows-1))
diff->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->v_samp_factor;
else
diff->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->last_row_height;
}
diff->mcu_ctr = 0;
diff->MCU_vert_offset = 0;
}
/*
* Initialize for a processing pass.
*/
METHODDEF(void)
start_pass_diff(j_compress_ptr cinfo, J_BUF_MODE pass_mode)
{
my_diff_ptr diff = (my_diff_ptr)cinfo->coef;
/* Because it is hitching a ride on the jpeg_forward_dct struct,
* start_pass_lossless() will be called at the start of the initial pass.
* This ensures that it will be called at the start of the Huffman
* optimization and output passes as well.
*/
if (pass_mode == JBUF_CRANK_DEST)
(*cinfo->fdct->start_pass) (cinfo);
diff->iMCU_row_num = 0;
start_iMCU_row(cinfo);
switch (pass_mode) {
case JBUF_PASS_THRU:
if (diff->whole_image[0] != NULL)
ERREXIT(cinfo, JERR_BAD_BUFFER_MODE);
diff->pub._compress_data = compress_data;
break;
#ifdef FULL_SAMP_BUFFER_SUPPORTED
case JBUF_SAVE_AND_PASS:
if (diff->whole_image[0] == NULL)
ERREXIT(cinfo, JERR_BAD_BUFFER_MODE);
diff->pub._compress_data = compress_first_pass;
break;
case JBUF_CRANK_DEST:
if (diff->whole_image[0] == NULL)
ERREXIT(cinfo, JERR_BAD_BUFFER_MODE);
diff->pub._compress_data = compress_output;
break;
#endif
default:
ERREXIT(cinfo, JERR_BAD_BUFFER_MODE);
break;
}
}
#define SWAP_ROWS(rowa, rowb) { \
_JSAMPROW temp = rowa; \
rowa = rowb; rowb = temp; \
}
/*
* Process some data in the single-pass case.
* We process the equivalent of one fully interleaved MCU row ("iMCU" row)
* per call, ie, v_samp_factor rows for each component in the image.
* Returns TRUE if the iMCU row is completed, FALSE if suspended.
*
* NB: input_buf contains a plane for each component in image,
* which we index according to the component's SOF position.
*/
METHODDEF(boolean)
compress_data(j_compress_ptr cinfo, _JSAMPIMAGE input_buf)
{
my_diff_ptr diff = (my_diff_ptr)cinfo->coef;
lossless_comp_ptr losslessc = (lossless_comp_ptr)cinfo->fdct;
JDIMENSION MCU_col_num; /* index of current MCU within row */
JDIMENSION MCU_count; /* number of MCUs encoded */
JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1;
int ci, compi, yoffset, samp_row, samp_rows, samps_across;
jpeg_component_info *compptr;
/* Loop to write as much as one whole iMCU row */
for (yoffset = diff->MCU_vert_offset; yoffset < diff->MCU_rows_per_iMCU_row;
yoffset++) {
MCU_col_num = diff->mcu_ctr;
/* Scale and predict each scanline of the MCU row separately.
*
* Note: We only do this if we are at the start of an MCU row, ie,
* we don't want to reprocess a row suspended by the output.
*/
if (MCU_col_num == 0) {
for (ci = 0; ci < cinfo->comps_in_scan; ci++) {
compptr = cinfo->cur_comp_info[ci];
compi = compptr->component_index;
if (diff->iMCU_row_num < last_iMCU_row)
samp_rows = compptr->v_samp_factor;
else {
/* NB: can't use last_row_height here, since may not be set! */
samp_rows =
(int)(compptr->height_in_blocks % compptr->v_samp_factor);
if (samp_rows == 0) samp_rows = compptr->v_samp_factor;
else {
/* Fill dummy difference rows at the bottom edge with zeros, which
* will encode to the smallest amount of data.
*/
for (samp_row = samp_rows; samp_row < compptr->v_samp_factor;
samp_row++)
memset(diff->diff_buf[compi][samp_row], 0,
jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor) * sizeof(JDIFF));
}
}
samps_across = compptr->width_in_blocks;
for (samp_row = 0; samp_row < samp_rows; samp_row++) {
(*losslessc->scaler_scale) (cinfo,
input_buf[compi][samp_row],
diff->cur_row[compi],
samps_across);
(*losslessc->predict_difference[compi])
(cinfo, compi, diff->cur_row[compi], diff->prev_row[compi],
diff->diff_buf[compi][samp_row], samps_across);
SWAP_ROWS(diff->cur_row[compi], diff->prev_row[compi]);
}
}
}
/* Try to write the MCU row (or remaining portion of suspended MCU row). */
MCU_count =
(*cinfo->entropy->encode_mcus) (cinfo,
diff->diff_buf, yoffset, MCU_col_num,
cinfo->MCUs_per_row - MCU_col_num);
if (MCU_count != cinfo->MCUs_per_row - MCU_col_num) {
/* Suspension forced; update state counters and exit */
diff->MCU_vert_offset = yoffset;
diff->mcu_ctr += MCU_col_num;
return FALSE;
}
/* Completed an MCU row, but perhaps not an iMCU row */
diff->mcu_ctr = 0;
}
/* Completed the iMCU row, advance counters for next one */
diff->iMCU_row_num++;
start_iMCU_row(cinfo);
return TRUE;
}
#ifdef FULL_SAMP_BUFFER_SUPPORTED
/*
* Process some data in the first pass of a multi-pass case.
* We process the equivalent of one fully interleaved MCU row ("iMCU" row)
* per call, ie, v_samp_factor rows for each component in the image.
* This amount of data is read from the source buffer and saved into the
* virtual arrays.
*
* We must also emit the data to the compressor. This is conveniently
* done by calling compress_output() after we've loaded the current strip
* of the virtual arrays.
*
* NB: input_buf contains a plane for each component in image. All components
* are loaded into the virtual arrays in this pass. However, it may be that
* only a subset of the components are emitted to the compressor during
* this first pass; be careful about looking at the scan-dependent variables
* (MCU dimensions, etc).
*/
METHODDEF(boolean)
compress_first_pass(j_compress_ptr cinfo, _JSAMPIMAGE input_buf)
{
my_diff_ptr diff = (my_diff_ptr)cinfo->coef;
JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1;
JDIMENSION samps_across;
int ci, samp_row, samp_rows;
_JSAMPARRAY buffer;
jpeg_component_info *compptr;
for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components;
ci++, compptr++) {
/* Align the virtual buffer for this component. */
buffer = (_JSAMPARRAY)(*cinfo->mem->access_virt_sarray)
((j_common_ptr)cinfo, diff->whole_image[ci],
diff->iMCU_row_num * compptr->v_samp_factor,
(JDIMENSION)compptr->v_samp_factor, TRUE);
/* Count non-dummy sample rows in this iMCU row. */
if (diff->iMCU_row_num < last_iMCU_row)
samp_rows = compptr->v_samp_factor;
else {
/* NB: can't use last_row_height here, since may not be set! */
samp_rows = (int)(compptr->height_in_blocks % compptr->v_samp_factor);
if (samp_rows == 0) samp_rows = compptr->v_samp_factor;
}
samps_across = compptr->width_in_blocks;
/* Perform point transform scaling and prediction/differencing for all
* non-dummy rows in this iMCU row. Each call on these functions
* processes a complete row of samples.
*/
for (samp_row = 0; samp_row < samp_rows; samp_row++) {
memcpy(buffer[samp_row], input_buf[ci][samp_row],
samps_across * sizeof(_JSAMPLE));
}
}
/* NB: compress_output will increment iMCU_row_num if successful.
* A suspension return will result in redoing all the work above next time.
*/
/* Emit data to the compressor, sharing code with subsequent passes */
return compress_output(cinfo, input_buf);
}
/*
* Process some data in subsequent passes of a multi-pass case.
* We process the equivalent of one fully interleaved MCU row ("iMCU" row)
* per call, ie, v_samp_factor rows for each component in the scan.
* The data is obtained from the virtual arrays and fed to the compressor.
* Returns TRUE if the iMCU row is completed, FALSE if suspended.
*
* NB: input_buf is ignored; it is likely to be a NULL pointer.
*/
METHODDEF(boolean)
compress_output(j_compress_ptr cinfo, _JSAMPIMAGE input_buf)
{
my_diff_ptr diff = (my_diff_ptr)cinfo->coef;
int ci, compi;
_JSAMPARRAY buffer[MAX_COMPS_IN_SCAN];
jpeg_component_info *compptr;
/* Align the virtual buffers for the components used in this scan.
* NB: during first pass, this is safe only because the buffers will
* already be aligned properly, so jmemmgr.c won't need to do any I/O.
*/
for (ci = 0; ci < cinfo->comps_in_scan; ci++) {
compptr = cinfo->cur_comp_info[ci];
compi = compptr->component_index;
buffer[compi] = (_JSAMPARRAY)(*cinfo->mem->access_virt_sarray)
((j_common_ptr)cinfo, diff->whole_image[compi],
diff->iMCU_row_num * compptr->v_samp_factor,
(JDIMENSION)compptr->v_samp_factor, FALSE);
}
return compress_data(cinfo, buffer);
}
#endif /* FULL_SAMP_BUFFER_SUPPORTED */
/*
* Initialize difference buffer controller.
*/
GLOBAL(void)
_jinit_c_diff_controller(j_compress_ptr cinfo, boolean need_full_buffer)
{
my_diff_ptr diff;
int ci, row;
jpeg_component_info *compptr;
#if BITS_IN_JSAMPLE == 8
if (cinfo->data_precision > BITS_IN_JSAMPLE || cinfo->data_precision < 2)
#else
if (cinfo->data_precision > BITS_IN_JSAMPLE ||
cinfo->data_precision < BITS_IN_JSAMPLE - 3)
#endif
ERREXIT1(cinfo, JERR_BAD_PRECISION, cinfo->data_precision);
diff = (my_diff_ptr)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
sizeof(my_diff_controller));
cinfo->coef = (struct jpeg_c_coef_controller *)diff;
diff->pub.start_pass = start_pass_diff;
/* Create the prediction row buffers. */
for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components;
ci++, compptr++) {
diff->cur_row[ci] = *(_JSAMPARRAY)(*cinfo->mem->alloc_sarray)
((j_common_ptr)cinfo, JPOOL_IMAGE,
(JDIMENSION)jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor),
(JDIMENSION)1);
diff->prev_row[ci] = *(_JSAMPARRAY)(*cinfo->mem->alloc_sarray)
((j_common_ptr)cinfo, JPOOL_IMAGE,
(JDIMENSION)jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor),
(JDIMENSION)1);
}
/* Create the difference buffer. */
for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components;
ci++, compptr++) {
diff->diff_buf[ci] =
ALLOC_DARRAY(JPOOL_IMAGE,
(JDIMENSION)jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor),
(JDIMENSION)compptr->v_samp_factor);
/* Prefill difference rows with zeros. We do this because only actual
* data is placed in the buffers during prediction/differencing, leaving
* any dummy differences at the right edge as zeros, which will encode
* to the smallest amount of data.
*/
for (row = 0; row < compptr->v_samp_factor; row++)
memset(diff->diff_buf[ci][row], 0,
jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor) * sizeof(JDIFF));
}
/* Create the sample buffer. */
if (need_full_buffer) {
#ifdef FULL_SAMP_BUFFER_SUPPORTED
/* Allocate a full-image virtual array for each component, */
/* padded to a multiple of samp_factor differences in each direction. */
for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components;
ci++, compptr++) {
diff->whole_image[ci] = (*cinfo->mem->request_virt_sarray)
((j_common_ptr)cinfo, JPOOL_IMAGE, FALSE,
(JDIMENSION)jround_up((long)compptr->width_in_blocks,
(long)compptr->h_samp_factor),
(JDIMENSION)jround_up((long)compptr->height_in_blocks,
(long)compptr->v_samp_factor),
(JDIMENSION)compptr->v_samp_factor);
}
#else
ERREXIT(cinfo, JERR_BAD_BUFFER_MODE);
#endif
} else
diff->whole_image[0] = NULL; /* flag for no virtual arrays */
}
#endif /* C_LOSSLESS_SUPPORTED */
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/libjpeg-turbo/src/jcdiffct.c
|
// MARK: - Sessions
extension SessionData {
@available(*, deprecated, message: "use SessionData.init(initialData:)")
public init(_ data: [String: String]) { self.init(initialData: data) }
}
|
swift
|
github
|
https://github.com/vapor/vapor
|
Sources/Vapor/_Deprecations.swift
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_admin
short_description: Add or modify PAN-OS user accounts password.
description:
- PanOS module that allows changes to the user account passwords by doing
API calls to the Firewall using pan-api as the protocol.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
admin_username:
description:
- username for admin user
required: false
default: "admin"
admin_password:
description:
- password for admin user
required: true
role:
description:
- role for admin user
required: false
default: null
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
# Set the password of user admin to "badpassword"
# Doesn't commit the candidate config
- name: set admin password
panos_admin:
ip_address: "192.168.1.1"
password: "admin"
admin_username: admin
admin_password: "badpassword"
commit: False
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
_ADMIN_XPATH = "/config/mgt-config/users/entry[@name='%s']"
def admin_exists(xapi, admin_username):
xapi.get(_ADMIN_XPATH % admin_username)
e = xapi.element_root.find('.//entry')
return e
def admin_set(xapi, module, admin_username, admin_password, role):
if admin_password is not None:
xapi.op(cmd='request password-hash password "%s"' % admin_password,
cmd_xml=True)
r = xapi.element_root
phash = r.find('.//phash').text
if role is not None:
rbval = "yes"
if role != "superuser" and role != 'superreader':
rbval = ""
ea = admin_exists(xapi, admin_username)
if ea is not None:
# user exists
changed = False
if role is not None:
rb = ea.find('.//role-based')
if rb is not None:
if rb[0].tag != role:
changed = True
xpath = _ADMIN_XPATH % admin_username
xpath += '/permissions/role-based/%s' % rb[0].tag
xapi.delete(xpath=xpath)
xpath = _ADMIN_XPATH % admin_username
xpath += '/permissions/role-based'
xapi.set(xpath=xpath,
element='<%s>%s</%s>' % (role, rbval, role))
if admin_password is not None:
xapi.edit(xpath=_ADMIN_XPATH % admin_username+'/phash',
element='<phash>%s</phash>' % phash)
changed = True
return changed
# setup the non encrypted part of the monitor
exml = []
exml.append('<phash>%s</phash>' % phash)
exml.append('<permissions><role-based><%s>%s</%s>'
'</role-based></permissions>' % (role, rbval, role))
exml = ''.join(exml)
# module.fail_json(msg=exml)
xapi.set(xpath=_ADMIN_XPATH % admin_username, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin'),
admin_username=dict(default='admin'),
admin_password=dict(no_log=True),
role=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
admin_username = module.params['admin_username']
if admin_username is None:
module.fail_json(msg="admin_username is required")
admin_password = module.params['admin_password']
role = module.params['role']
commit = module.params['commit']
changed = admin_set(xapi, module, admin_username, admin_password, role)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*! Select2 4.0.13 | https://github.com/select2/select2/blob/master/LICENSE.md */
!function(){if(jQuery&&jQuery.fn&&jQuery.fn.select2&&jQuery.fn.select2.amd)var e=jQuery.fn.select2.amd;e.define("select2/i18n/sq",[],function(){return{errorLoading:function(){return"Rezultatet nuk mund të ngarkoheshin."},inputTooLong:function(e){var n=e.input.length-e.maximum,t="Të lutem fshi "+n+" karakter";return 1!=n&&(t+="e"),t},inputTooShort:function(e){return"Të lutem shkruaj "+(e.minimum-e.input.length)+" ose më shumë karaktere"},loadingMore:function(){return"Duke ngarkuar më shumë rezultate…"},maximumSelected:function(e){var n="Mund të zgjedhësh vetëm "+e.maximum+" element";return 1!=e.maximum&&(n+="e"),n},noResults:function(){return"Nuk u gjet asnjë rezultat"},searching:function(){return"Duke kërkuar…"},removeAllItems:function(){return"Hiq të gjitha sendet"}}}),e.define,e.require}();
|
javascript
|
github
|
https://github.com/django/django
|
django/contrib/admin/static/admin/js/vendor/select2/i18n/sq.js
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_MLIR_ROUNDTRIP_FLAGS_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_MLIR_ROUNDTRIP_FLAGS_H_
// Transitional forwarding header.
#include "tensorflow/compiler/mlir/tf2xla/api/v2/mlir_roundtrip_flags.h"
#endif // TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_MLIR_ROUNDTRIP_FLAGS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h
|
#! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2009 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# compatibility folder Python < 2.6
try:
bytes
bytearray
except (NameError, AttributeError):
# Python older than 2.6 do not have these types. Like for Python 2.6 they
# should behave like str. For Python older than 3.0 we want to work with
# strings anyway, only later versions have a true bytes type.
bytes = str
# bytearray is a mutable type that is easily turned into an instance of
# bytes
class bytearray(list):
# for bytes(bytearray()) usage
def __str__(self): return ''.join(self)
# append automatically converts integers to characters
def append(self, item):
if isinstance(item, str):
list.append(self, item)
else:
list.append(self, chr(item))
# +=
def __iadd__(self, other):
for byte in other:
self.append(byte)
return self
# all Python versions prior 3.x convert str([17]) to '[17]' instead of '\x11'
# so a simple bytes(sequence) doesn't work for all versions
def to_bytes(seq):
"""convert a sequence to a bytes type"""
b = bytearray()
for item in seq:
b.append(item) # this one handles int and str
return bytes(b)
# create control bytes
XON = to_bytes([17])
XOFF = to_bytes([19])
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
class SerialException(IOError):
"""Base class for serial port related exceptions."""
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException("Write timeout")
portNotOpenError = ValueError('Attempting to use a port that is not open')
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def __init__(self):
self.closed = True
def close(self):
self.closed = True
# so that ports are closed when objects are discarded
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
# iterator for e.g. "for line in Serial(0): ..." usage
def next(self):
line = self.readline()
if not line: raise StopIteration
return line
def __iter__(self):
return self
# other functions of file-likes - not used by pySerial
#~ readinto(b)
def seek(self, pos, whence=0):
raise IOError("file is not seekable")
def tell(self):
raise IOError("file is not seekable")
def truncate(self, n=None):
raise IOError("file is not seekable")
def isatty(self):
return False
class SerialBase(object):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
# default values, may be overridden in subclasses that do not support all values
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
3000000, 3500000, 4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
def __init__(self,
port = None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=None, # set a timeout value, None to wait forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
writeTimeout=None, # set a timeout for writes
dsrdtr=None, # None: use rtscts setting, dsrdtr override if true or false
interCharTimeout=None # Inter-character timeout, None to disable
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None # correct value is assigned below through properties
self._baudrate = None # correct value is assigned below through properties
self._bytesize = None # correct value is assigned below through properties
self._parity = None # correct value is assigned below through properties
self._stopbits = None # correct value is assigned below through properties
self._timeout = None # correct value is assigned below through properties
self._writeTimeout = None # correct value is assigned below through properties
self._xonxoff = None # correct value is assigned below through properties
self._rtscts = None # correct value is assigned below through properties
self._dsrdtr = None # correct value is assigned below through properties
self._interCharTimeout = None # correct value is assigned below through properties
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: these are not really needed as the is the BAUDRATES etc. attribute...
# maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open: self.close()
if port is not None:
if isinstance(port, basestring):
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
self.name = self.portstr
if was_open: self.open()
def getPort(self):
"""Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baud rate. It raises a ValueError if the port is open and the
baud rate is not possible. If the port is closed, then the value is
accepted and the exception is raised when the port is opened."""
try:
self._baudrate = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if self._isOpen: self._reconfigurePort()
def getBaudrate(self):
"""Get the current baud rate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baud rate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self._isOpen: self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self._isOpen: self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stop bits size."""
if stopbits not in self.STOPBITS: raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
self._stopbits = stopbits
if self._isOpen: self._reconfigurePort()
def getStopbits(self):
"""Get the current stop bits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stop bits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self._isOpen: self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen: self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XON/XOFF setting."""
self._xonxoff = xonxoff
if self._isOpen: self._reconfigurePort()
def getXonXoff(self):
"""Get the current XON/XOFF setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="XON/XOFF setting")
def setRtsCts(self, rtscts):
"""Change RTS/CTS flow control setting."""
self._rtscts = rtscts
if self._isOpen: self._reconfigurePort()
def getRtsCts(self):
"""Get the current RTS/CTS flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen: self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DSR/DTR flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
def setInterCharTimeout(self, interCharTimeout):
"""Change inter-character timeout setting."""
if interCharTimeout is not None:
if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
try:
interCharTimeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
self._interCharTimeout = interCharTimeout
if self._isOpen: self._reconfigurePort()
def getInterCharTimeout(self):
"""Get the current inter-character timeout setting."""
return self._interCharTimeout
interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
# - - - - - - - - - - - - - - - - - - - - - - - -
_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
'dsrdtr', 'rtscts', 'timeout', 'writeTimeout', 'interCharTimeout')
def getSettingsDict(self):
"""Get current port settings as a dictionary. For use with
applySettingsDict"""
return dict([(key, getattr(self, '_'+key)) for key in self._SETTINGS])
def applySettingsDict(self, d):
"""apply stored settings from a dictionary returned from
getSettingsDict. it's allowed to delete keys from the dictionary. these
values will simply left unchanged."""
for key in self._SETTINGS:
if d[key] != getattr(self, '_'+key): # check against internal "_" value
setattr(self, key, d[key]) # set non "_" value to use properties write function
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self._isOpen,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
# - - - - - - - - - - - - - - - - - - - - - - - -
def readline(self, size=None, eol='\n'):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout"""
line = ''
while 1:
c = self.read(1)
if c:
line += c # not very efficient but lines are usually not that long
if c == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return bytes(line)
def readlines(self, sizehint=None, eol='\n'):
"""read a list of lines, until timeout
sizehint is ignored"""
if self.timeout is None:
raise ValueError("Serial port MUST have enabled timeout for this function!")
lines = []
while 1:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-1] != eol: # was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""just call readlines - here for compatibility"""
return self.readlines()
# - - - - - - - - - - - - - - - - - - - - - - - -
# compatibility with io library
def readable(self): return True
def writable(self): return True
def seekable(self): return False
def readinto(self, b):
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError, err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
if __name__ == '__main__':
import sys
s = SerialBase()
sys.stdout.write('port name: %s\n' % s.portstr)
sys.stdout.write('baud rates: %s\n' % s.getSupportedBaudrates())
sys.stdout.write('byte sizes: %s\n' % s.getSupportedByteSizes())
sys.stdout.write('parities: %s\n' % s.getSupportedParities())
sys.stdout.write('stop bits: %s\n' % s.getSupportedStopbits())
sys.stdout.write('%s\n' % s)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.impl;
import java.nio.ByteBuffer;
import java.util.function.Consumer;
import java.util.function.IntFunction;
import org.apache.hadoop.io.ByteBufferPool;
import static java.util.Objects.requireNonNull;
/**
* A ByteBufferPool implementation that uses a pair of functions to allocate
* and release ByteBuffers; intended for use implementing the VectorIO API
* as it makes the pair of functions easier to pass around and use in
* existing code.
* <p>
* No matter what kind of buffer is requested, the allocation function
* is invoked; that is: the direct flag is ignored.
*/
public final class VectorIOBufferPool implements ByteBufferPool {
/** The function to allocate a buffer. */
private final IntFunction<ByteBuffer> allocate;
/** The function to release a buffer. */
private final Consumer<ByteBuffer> release;
/**
* @param allocate function to allocate ByteBuffer
* @param release callable to release a ByteBuffer.
*/
public VectorIOBufferPool(
IntFunction<ByteBuffer> allocate,
Consumer<ByteBuffer> release) {
this.allocate = requireNonNull(allocate);
this.release = requireNonNull(release);
}
/**
* Get a ByteBuffer.
* @param direct heap/direct flag. Unused.
* @param length The minimum length the buffer will have.
* @return a buffer
*/
@Override
public ByteBuffer getBuffer(final boolean direct, final int length) {
return allocate.apply(length);
}
/**
* Release a buffer.
* Unlike normal ByteBufferPool implementations
* a null buffer is accepted and ignored.
* @param buffer buffer to release; may be null.
*/
@Override
public void putBuffer(final ByteBuffer buffer) {
if (buffer != null) {
release.accept(buffer);
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/VectorIOBufferPool.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.