text
stringlengths
3
1.05M
from __future__ import print_function from collections import OrderedDict import os import sys import timeit import scipy.io as sio import numpy as np import theano import theano.tensor as T ################################################################################################################ ################################################################################################################ '''Layer Definition''' class HiddenLayer(object): def __init__(self, rng, input_source, input_target, n_in, n_out, W=None, b=None, activation=T.nnet.sigmoid, name=''): """ Typical hidden layer of a MLP: units are fully-connected and have activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer """ self.input = input if W is None: W_values = np.asarray( rng.uniform( low=-np.sqrt(6. / (n_in + n_out)), high=np.sqrt(6. / (n_in + n_out)), size=(n_in, n_out) ), dtype=theano.config.floatX ) if activation == T.nnet.sigmoid: W_values *= 4 W = theano.shared(value=W_values, name=name+'_W', borrow=True) if b is None: b_values = np.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name=name+'_b', borrow=True) self.W = W self.b = b lin_output_source = T.dot(input_source, self.W) + self.b lin_output_target = T.dot(input_target, self.W) + self.b self.output_source = ( lin_output_source if activation is None else activation(lin_output_source) ) self.output_target = ( lin_output_target if activation is None else activation(lin_output_target) ) # parameters of the model self.params = [self.W, self.b] #self.params = [(name+'_W', self.W), (name+'_b', self.b)] class GaussianSampleLayer(object): def __init__(self, mu, log_sigma, n_in, batch_size): ''' This layer is presenting the gaussian sampling process of Stochastic Gradient Variational Bayes(SVGB) :type mu: theano.tensor.dmatrix :param mu: a symbolic tensor of shape (n_batch, n_in), means the sample mean(mu) :type log_sigma: theano.tensor.dmatrix :param log_sigma: a symbolic tensor of shape (n_batch, n_in), means the log-variance(log_sigma). here using diagonal variance, so a data has a row variance. ''' seed = 42 ''' if "gpu" in theano.config.device: srng = theano.sandbox.cuda.rng_curand.CURAND_RandomStreams(seed=seed) else: ''' srng = T.shared_randomstreams.RandomStreams(seed=seed) epsilon = srng.normal((batch_size, n_in)) #self.mu = mu #self.log_sigma = log_sigma #epsilon = np.asarray(rng.normal(size=(batch_size,n_in)), dtype=theano.config.floatX) self.output = mu + T.exp(0.5*log_sigma) * epsilon class CatSampleLayer(object): def __init__(self, pi, n_in, batch_size): ''' This layer is presenting the categorical distribution sampling process of Stochastic Gradient Variational Bayes(SVGB) :type pi: theano.tensor.dmatrix :param pi: a symbolic tensor of shape (n_batch, n_in), means the probability of each category ''' seed = 42 srng = T.shared_randomstreams.RandomStreams(seed=seed) #generate standard Gumbel distribution from uniform distribution epsilon = srng.uniform((batch_size, n_in)) c = 0.01 gamma = T.log(pi + c) + epsilon self.output = T.eq(gamma / T.max(gamma), T.ones((batch_size, n_in))) ################################################################################################################ ################################################################################################################ ''' Data/Parameter Structure Definition Here NN parameter refer to Weight, bias. NN structure refer to hidden dimension and accroding activation function ''' class NN_struct: def __init__(self): self.layer_dim = [] self.activation = [] ''' Neural Network Block Definition here refer to complete Neural Network system block with fixed hidden layer number ''' class NN_Block_0L: def __init__(self, rng, input_source, input_target, struct, name=''): if len(struct.layer_dim) != 2: print('used wrong NN Block size') #Output Layer self.OL = HiddenLayer( rng=rng, input_source=input_source, input_target=input_target, n_in=struct.layer_dim[0], n_out=struct.layer_dim[1], activation=struct.activation[0], name=name+'_OL' ) self.output_source = self.OL.output_source self.output_target = self.OL.output_target self.params = self.OL.params class NN_Block_1L: def __init__(self, rng, input_source, input_target, struct): if len(struct.layer_dim) != 3: print('used wrong NN Block size') #Hidden Layer self.HL_1 = HiddenLayer( rng=rng, input_source=input_source, input_target=input_target, n_in=struct.layer_dim[0], n_out=struct.layer_dim[1], activation=struct.activation[0], name=name+'_L1' ) #Output Layer self.OL = HiddenLayer( rng=rng, input_source=self.HL_1.output_source, input_target=self.HL_1.output_target, n_in=struct.layer_dim[1], n_out=struct.layer_dim[2], activation=struct.activation[1], name=name+'_OL' ) self.output_source = self.OL.output_source self.output_target = self.OL.output_target self.params = self.HL_1.params + self.OL.params
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ______ ______ ______ __ __ __ ______ /\ == \ /\ __ \ /\__ _\ /\ \/ / /\ \ /\__ _\ \ \ __< \ \ \/\ \ \/_/\ \/ \ \ _"-. \ \ \ \/_/\ \/ \ \_____\ \ \_____\ \ \_\ \ \_\ \_\ \ \_\ \ \_\ \/_____/ \/_____/ \/_/ \/_/\/_/ \/_/ \/_/ This is a sample Slack bot built with Botkit. This bot demonstrates many of the core features of Botkit: * Connect to Slack using the real time API * Receive messages based on "spoken" patterns * Send a message with attachments * Send a message via direct message (instead of in a public channel) # RUN THE BOT: Get a Bot token from Slack: -> http://my.slack.com/services/new/bot Run your bot from the command line: token=<MY TOKEN> node demo_bot.js # USE THE BOT: Find your bot inside Slack to send it a direct message. Say: "Hello" The bot will reply "Hello!" Say: "Attach" The bot will send a message with a multi-field attachment. Send: "dm me" The bot will reply with a direct message. Make sure to invite your bot into other channels using /invite @<my bot>! # EXTEND THE BOT: Botkit has many features for building cool and useful bots! Read all about it here: -> http://howdy.ai/botkit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ var Botkit = require('../lib/Botkit.js'); if (!process.env.token) { console.log('Error: Specify token in environment'); process.exit(1); } var controller = Botkit.slackbot({ debug: false }); controller.spawn({ token: process.env.token }).startRTM(function(err) { if (err) { throw new Error(err); } }); controller.hears(['hello','hi'],['direct_message','direct_mention','mention'],function(bot,message) { bot.reply(message,"Hello."); }); controller.hears(['attach'],['direct_message','direct_mention'],function(bot,message) { var attachments = []; var attachment = { title: 'This is an attachment', color: '#FFCC99', fields: [], }; attachment.fields.push({ label: 'Field', value: 'A longish value', short: false, }); attachment.fields.push({ label: 'Field', value: 'Value', short: true, }); attachment.fields.push({ label: 'Field', value: 'Value', short: true, }); attachments.push(attachment); bot.reply(message,{ text: 'See below...', attachments: attachments, },function(err,resp) { console.log(err,resp); }); }); controller.hears(['dm me'],['direct_message','direct_mention'],function(bot,message) { bot.startConversation(message,function(err,convo) { convo.say('Heard ya'); }); bot.startPrivateConversation(message,function(err,dm) { dm.say('Private reply!'); }); });
""" ASGI config for crm project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crm.settings') application = get_asgi_application()
from pkg_resources import resource_string import questionary from cid import utils from cid.helpers import Athena, CUR, Glue, QuickSight from cid.helpers.account_map import AccountMap from cid.plugin import Plugin import os import sys import click from string import Template import json from pathlib import Path from botocore.exceptions import NoCredentialsError, CredentialRetrievalError from deepmerge import always_merger import logging logger = logging.getLogger(__name__) class Cid: defaults = { 'quicksight_url': 'https://{region}.quicksight.aws.amazon.com/sn/dashboards/{dashboard_id}' } def __init__(self, **kwargs) -> None: self.__setupLogging(verbosity=kwargs.pop('verbose')) logger.info('Initializing CID') # Defined resources self.resources = dict() self.dashboards = dict() self.plugins = self.__loadPlugins() self._clients = dict() self.awsIdentity = None self.session = None self.qs_url = kwargs.get( 'quicksight_url', self.defaults.get('quicksight_url')) @property def qs(self) -> QuickSight: if not self._clients.get('quicksight'): self._clients.update({ 'quicksight': QuickSight(self.session, self.awsIdentity, resources=self.resources) }) return self._clients.get('quicksight') @property def athena(self) -> Athena: if not self._clients.get('athena'): self._clients.update({ 'athena': Athena(self.session, resources=self.resources) }) return self._clients.get('athena') @property def glue(self) -> Glue: if not self._clients.get('glue'): self._clients.update({ 'glue': Glue(self.session) }) return self._clients.get('glue') @property def cur(self) -> CUR: if not self._clients.get('cur'): _cur = CUR(self.session) _cur.athena = self.athena print('Checking if CUR is enabled and available...') if not _cur.configured: print( "Error: please ensure CUR is enabled, if yes allow it some time to propagate") exit(1) print(f'\tAthena table: {_cur.tableName}') print(f"\tResource IDs: {'yes' if _cur.hasResourceIDs else 'no'}") if not _cur.hasResourceIDs: print("Error: CUR has to be created with Resource IDs") exit(1) print(f"\tSavingsPlans: {'yes' if _cur.hasSavingsPlans else 'no'}") print(f"\tReserved Instances: {'yes' if _cur.hasReservations else 'no'}") print('done') self._clients.update({ 'cur': _cur }) return self._clients.get('cur') @property def accountMap(self) -> AccountMap: if not self._clients.get('accountMap'): _account_map = AccountMap(self.session) _account_map.athena = self.athena _account_map.cur = self.cur self._clients.update({ 'accountMap': _account_map }) return self._clients.get('accountMap') def __loadPlugins(self) -> dict: if sys.version_info < (3, 8): from importlib_metadata import entry_points else: from importlib.metadata import entry_points plugins = dict() _entry_points = entry_points().get('cid.plugins') print('Loading plugins...') logger.info(f'Located {len(_entry_points)} plugin(s)') for ep in _entry_points: logger.info(f'Loading plugin: {ep.name} ({ep.value})') plugin = Plugin(ep.value) print(f"\t{ep.name} loaded") plugins.update({ep.value: plugin}) try: self.resources = always_merger.merge( self.resources, plugin.provides()) except AttributeError: pass print('done\n') logger.info('Finished loading plugins') return plugins def getPlugin(self, plugin) -> dict: return self.plugins.get(plugin) def run(self, **kwargs): print('Checking AWS environment...') try: self.session = utils.get_boto_session(**kwargs) if self.session.profile_name: print(f'\tprofile name: {self.session.profile_name}') logger.info(f'AWS profile name: {self.session.profile_name}') sts = self.session.client('sts') self.awsIdentity = sts.get_caller_identity() self.qs_url_params = { 'account_id': self.awsIdentity.get('Account'), 'region': self.session.region_name } except (NoCredentialsError, CredentialRetrievalError): print('Error: Not authenticated, please check AWS credentials') logger.info('Not authenticated, exiting') exit() print('\taccountId: {}\n\tAWS userId: {}'.format( self.awsIdentity.get('Account'), self.awsIdentity.get('Arn').split(':')[5] )) logger.info(f'AWS accountId: {self.awsIdentity.get("Account")}') logger.info(f'AWS userId: {self.awsIdentity.get("Arn").split(":")[5]}') print('\tRegion: {}'.format(self.session.region_name)) logger.info(f'AWS region: {self.session.region_name}') print('done\n') def __setupLogging(self, verbosity: int=0, log_filename: str='cid.log') -> None: _logger = logging.getLogger('cid') # create formatter formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s:%(funcName)s:%(lineno)d - %(message)s') # File handler logs everything down to DEBUG level fh = logging.FileHandler(log_filename) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) # Console handler logs everything down to ERROR level ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create formatter and add it to the handlers ch.setFormatter(formatter) # add the handlers to logger _logger.addHandler(ch) _logger.addHandler(fh) if verbosity: # Limit Logging level to DEBUG, base level is WARNING verbosity = 2 if verbosity > 2 else verbosity _logger.setLevel(logger.getEffectiveLevel()-10*verbosity) # Logging application start here due to logging configuration print(f'Logging level set to: {logging.getLevelName(logger.getEffectiveLevel())}') def deploy(self, **kwargs): """ Deploy Dashboard """ selection = list() for k, dashboard in self.resources.get('dashboards').items(): selection.append( questionary.Choice( title=f"{dashboard.get('name')}", value=k ) ) try: selected_dashboard = questionary.select( "Please select dashboard to install", choices=selection ).ask() except: print('\nEnd: No updates available or dashboard(s) is/are broken\n') return # Get selected dashboard definition dashboard_definition = self.resources.get( 'dashboards').get(selected_dashboard) required_datasets = dashboard_definition.get( 'dependsOn', dict()).get('datasets', list()) self.create_datasets(required_datasets) # Prepare API parameters if not dashboard_definition.get('datasets'): dashboard_definition.update({'datasets': {}}) dashboard_datasets = dashboard_definition.get('datasets') for dataset_name in required_datasets: arn = next((v.get('Arn') for v in self.qs._datasets.values() if v.get('Name') == dataset_name), None) if arn: dashboard_datasets.update({dataset_name: arn}) kwargs = dict() local_overrides = f'work/{self.awsIdentity.get("Account")}/{dashboard_definition.get("dashboardId")}.json' print( f'Looking for local overrides file "{local_overrides}"...', end='') try: with open(local_overrides, 'r', encoding='utf-8') as r: try: print('found') if click.confirm(f'Use local overrides from {local_overrides}?'): kwargs = json.load(r) print('loaded') except Exception as e: # Catch exception and dump a reason click.echo('failed to load, dumping error message') print(json.dumps(e, indent=4, sort_keys=True, default=str)) except FileNotFoundError: print('not found') # Get QuickSight template details latest_template = self.qs.describe_template(template_id=dashboard_definition.get( 'templateId'), account_id=dashboard_definition.get('sourceAccountId')) dashboard_definition.update({'sourceTemplate': latest_template}) # Create dashboard click.echo( f"Latest template: {latest_template.get('Arn')}/version/{latest_template.get('Version').get('VersionNumber')}") click.echo('\nDeploying...', nl=False) _url = self.qs_url.format( dashboard_id=dashboard_definition.get('dashboardId'), **self.qs_url_params ) try: self.qs.create_dashboard(dashboard_definition, **kwargs) click.echo('completed') click.echo( f"#######\n####### Congratulations!\n####### {dashboard_definition.get('name')} is available at: {_url}\n#######") except self.qs.client.exceptions.ResourceExistsException: click.echo('error, already exists') click.echo( f"#######\n####### {dashboard_definition.get('name')} is available at: {_url}\n#######") except Exception as e: # Catch exception and dump a reason click.echo('failed, dumping error message') print(json.dumps(e, indent=4, sort_keys=True, default=str)) exit(1) return dashboard.get('dashboardId') def open(self, dashboard_id): """Open QuickSight dashboard in browser""" if os.environ.get('AWS_EXECUTION_ENV') in ['CloudShell', 'AWS_Lambda']: print(f"Operation is not supported in {os.environ.get('AWS_EXECUTION_ENV')}") return dashboard_id if not dashboard_id: dashboard_id = self.qs.select_dashboard(force=True) dashboard = self.qs.dashboards.get(dashboard_id) else: # Describe dashboard by the ID given, no discovery dashboard = self.qs.describe_dashboard(DashboardId=dashboard_id) click.echo('Getting dashboard status...', nl=False) if dashboard is not None: if dashboard.version.get('Status') not in ['CREATION_SUCCESSFUL']: print(json.dumps(dashboard.version.get('Errors'), indent=4, sort_keys=True, default=str)) click.echo( f'\nDashboard is unhealthy, please check errors above.') click.echo('healthy, opening...') click.launch(self.qs_url.format(dashboard_id=dashboard_id, **self.qs_url_params)) else: click.echo('not deployed.') return dashboard_id def status(self, dashboard_id, **kwargs): """Check QuickSight dashboard status""" if not dashboard_id: dashboard_id = self.qs.select_dashboard(force=True) if not dashboard_id: click.echo('No deployed dashboard found') return dashboard = self.qs.dashboards.get(dashboard_id) else: # Describe dashboard by the ID given, no discovery self.qs.discover_dashboard(dashboardId=dashboard_id) dashboard = self.qs.describe_dashboard(DashboardId=dashboard_id) if dashboard is not None: dashboard.display_status() dashboard.display_url(self.qs_url, **self.qs_url_params) else: click.echo('not deployed.') def delete(self, dashboard_id, **kwargs): """Delete QuickSight dashboard""" if not dashboard_id: dashboard_id = self.qs.select_dashboard(force=True) if not dashboard_id: click.echo('No selection, exiting.') exit() try: # Execute query click.echo('Deleting dashboard...', nl=False) self.qs.delete_dashboard(dashboard_id=dashboard_id) print('deleted') return dashboard_id except self.qs.client.exceptions.ResourceNotFoundException: print('not found') except Exception as e: # Catch exception and dump a reason click.echo('failed, dumping error message') print(json.dumps(e, indent=4, sort_keys=True, default=str)) def update(self, dashboard_id, **kwargs): """Update Dashboard""" if not dashboard_id: dashboard_id = self.qs.select_dashboard(force=kwargs.get('force')) if not dashboard_id: exit() dashboard = self.qs.dashboards.get(dashboard_id) if not dashboard: click.echo(f'Dashboard "{dashboard_id}" is not deployed') return print(f'\nChecking for updates...') click.echo(f'Deployed template: {dashboard.deployed_arn}') click.echo( f"Latest template: {dashboard.sourceTemplate.get('Arn')}/version/{dashboard.latest_version}") if dashboard.status == 'legacy': click.confirm( "\nDashboard template changed, update it anyway?", abort=True) elif dashboard.latest: click.confirm( "\nNo updates available, should I update it anyway?", abort=True) kwargs = dict() local_overrides = f'work/{self.awsIdentity.get("Account")}/{dashboard.id}.json' print( f'Looking for local overrides file "{local_overrides}"...', end='') try: with open(local_overrides, 'r', encoding='utf-8') as r: try: print('found') if click.confirm(f'Use local overrides from {local_overrides}?'): kwargs = json.load(r) print('loaded') except Exception as e: # Catch exception and dump a reason click.echo('failed to load, dumping error message') print(json.dumps(e, indent=4, sort_keys=True, default=str)) except FileNotFoundError: print('not found') # Update dashboard click.echo('\nUpdating...', nl=False) try: self.qs.update_dashboard(dashboard, **kwargs) click.echo('completed') dashboard.display_url(self.qs_url, launch=True, **self.qs_url_params) except Exception as e: # Catch exception and dump a reason click.echo('failed, dumping error message') print(json.dumps(e, indent=4, sort_keys=True, default=str)) return dashboard_id def create_datasets(self, _datasets: list) -> dict: # Check dependencies required_datasets = sorted(_datasets) print('\nRequired datasets: \n - {}'.format('\n - '.join(required_datasets))) try: print('\nDetecting existing datasets...', end='') for dataset in self.qs.list_data_sets(): try: self.qs.describe_dataset(dataset.get('DataSetId')) except: continue except self.qs.client.exceptions.AccessDeniedException: print('no permissions, performing full discrovery...', end='') self.qs.dashboards for dataset in required_datasets: dataset_definition = self.resources.get( 'datasets').get(dataset) finally: print('complete') found_datasets = sorted( set(required_datasets).intersection([v.get('Name') for v in self.qs._datasets.values()])) missing_datasets = sorted( list(set(required_datasets).difference(found_datasets))) # If we miss required datasets look in saved deployments if len(missing_datasets): # TODO: remove below 2 lines ? if len(found_datasets): print('\nFound: \n - {}'.format('\n - '.join(found_datasets))) print('\nMissing: \n - {}'.format('\n - '.join(missing_datasets))) # Look for previously saved deployment info print('\nLooking in saved deployments...', end='') saved_datasets = self.find_saved_datasets(missing_datasets) print('{}'.format('nothing found' if not len( saved_datasets) else 'complete')) for k, v in saved_datasets.items(): print(f'\tfound: {k}', end='') if len(v.keys()) > 1: # Multiple datasets selected = questionary.select( f'Multiple "{k}" datasets detected, please select one', choices=v.keys() ).ask() self.qs._datasets.update({k: v.get(selected)}) missing_datasets.remove(k) elif len(v.keys()): # Single dataset print(', using') self.qs._datasets.update({k: next(iter(v.values()))}) missing_datasets.remove(k) # Look by DataSetId from dataset_template file if len(missing_datasets): # Look for previously saved deployment info print('\nLooking by DataSetId defined in template...', end='') for dataset_name in missing_datasets[:]: try: dataset_definition = self.resources.get( 'datasets').get(dataset_name) dataset_file = dataset_definition.get('File') # Load TPL file if dataset_file: raw_template = json.loads(resource_string(dataset_definition.get( 'providedBy'), f'data/datasets/{dataset_file}').decode('utf-8')) ds = self.qs.describe_dataset(raw_template.get('DataSetId')) if ds.get('Name') == dataset_name: missing_datasets.remove(dataset_name) print(f"\n\tFound {dataset_name} as {raw_template.get('DataSetId')}") except FileNotFoundError: logger.info(f'File "{dataset_file}" not found') pass except self.qs.client.exceptions.ResourceNotFoundException: logger.info(f'Dataset "{dataset_name}" not found') pass except self.qs.client.exceptions.AccessDeniedException: logger.info(f'Access denied trying to find dataset "{dataset_name}"') pass except: raise print('complete') # If there still datasets missing try automatic creation if len(missing_datasets): missing_str = ', '.join(missing_datasets) print( f'\nThere are still {len(missing_datasets)} datasets missing: {missing_str}') for dataset_name in missing_datasets[:]: print(f'Creating dataset: {dataset_name}...', end='') try: dataset_definition = self.resources.get( 'datasets').get(dataset_name) except: logger.critical( 'dashboard definition is broken, unable to proceed.') logger.critical( f'dataset definition not found: {dataset_name}') raise try: if self.create_dataset(dataset_definition): missing_datasets.remove(dataset_name) print('created') else: print('failed') except self.qs.client.exceptions.AccessDeniedException as AccessDeniedException: print('unable to create, missing permissions: {}'.format(AccessDeniedException)) # Last chance to enter DataSetIds manually by user if len(missing_datasets): missing_str = '\n - '.join(missing_datasets) print( f'\nThere are still {len(missing_datasets)} datasets missing: \n - {missing_str}') print( f"\nCan't move forward without full list, please manually create datasets and provide DataSetIds") # Loop over the list unless we get it empty while len(missing_datasets): # Make a copy and then get an item from the list dataset_name = missing_datasets.copy().pop() _id = click.prompt( f'\tDataSetId/Arn for {dataset_name}', type=str) id = _id.split('/')[-1] try: _dataset = self.qs.describe_dataset(id) if _dataset.get('Name') != dataset_name: print(f"\tFound dataset with a different name: {_dataset.get('Name')}, please provide another one") continue self.qs._datasets.update({dataset_name: _dataset}) missing_datasets.remove(dataset_name) print(f'\tFound, using it') except: print(f"\tProvided DataSetId '{id}' can't be found\n") continue def create_dataset(self, dataset_definition) -> bool: # Check for required views _views = dataset_definition.get('dependsOn').get('views') required_views = [self.cur.tableName if name == '${cur_table_name}' else name for name in _views] self.athena.discover_views(required_views) found_views = sorted(set(required_views).intersection( self.athena._metadata.keys())) missing_views = sorted( list(set(required_views).difference(found_views))) # try discovering missing views self.athena.discover_views(missing_views) # repeat comparison found_views = sorted(set(required_views).intersection( self.athena._metadata.keys())) missing_views = sorted( list(set(required_views).difference(found_views))) # create missing views if len(missing_views): print(f'\tmissing Athena views: {missing_views}') self.create_views(missing_views) # Read dataset definition from template dataset_file = dataset_definition.get('File') if dataset_file: if not len(self.qs.athena_datasources): logger.info('No Athena datasources found, attempting to create one') self.qs.create_data_source() if not len(self.qs.athena_datasources): logger.info('No Athena datasources available, failing') return False # Load TPL file columns_tpl = dict() columns_tpl.update({ 'cur_table_name': self.cur.tableName if dataset_definition.get('dependsOn').get('cur') else None, 'athena_datasource_arn': next(iter(self.qs.athena_datasources)), 'athena_database_name': self.athena.DatabaseName, 'user_arn': self.qs.user.get('Arn') }) template = Template(resource_string(dataset_definition.get( 'providedBy'), f'data/datasets/{dataset_file}').decode('utf-8')) compiled_dataset = json.loads(template.safe_substitute(columns_tpl)) self.qs.create_dataset(compiled_dataset) else: print(f"Error: {dataset_definition.get('Name')} definition is broken") exit(1) return True def find_saved_datasets(self, datasets: list) -> dict: """Look for datasets in saved deployments""" # Get all saved deployment found saved_deployments = self.find_saved_deployments() found_datasets = dict() for deployment in saved_deployments: try: # extract dataset references from saved deployment _datasets = deployment.get('SourceEntity').get( 'SourceTemplate').get('DataSetReferences', list()) for dataset in _datasets: # we're interested only in datsets from the list if dataset.get('DataSetPlaceholder') in datasets: # check if the dataset exists by describing it try: _dataset = self.qs.describe_dataset( dataset.get('DataSetArn').split('/')[1]) except: continue # Create a list of found datasets per dataset name if not found_datasets.get(_dataset.get('Name')): found_datasets.update( {_dataset.get('Name'): dict()}) # Add datasets using Arn as a key if not found_datasets.get(_dataset.get('Name')).get(_dataset.get('Arn')): found_datasets.get(_dataset.get('Name')).update( {_dataset.get('Arn'): _dataset}) except AttributeError: # move to next saved deployment if the key is not present continue return found_datasets def find_saved_deployments(self) -> list: """Look for saved deployment information""" # Set base paths abs_path = Path().absolute() # Find all saved deployments for current AWS account file_path = os.path.join( abs_path, f'work/{self.awsIdentity.get("Account")}') found_deployments = list() if os.path.isdir(file_path): files = [f for f in os.listdir(file_path) if f.endswith('.json')] for file in files: with open(os.path.join(file_path, file)) as f: found_deployments.append(json.loads(f.read())) return found_deployments def create_views(self, views: list) -> None: for view in views: self.create_view(view) def create_view(self, view_name: str) -> None: # For account mappings create a view using a special helper if view_name in ['account_map', 'aws_accounts']: self.accountMap.create(view_name) return # Create a view print(f'\nCreating view: {view_name}') logger.info(f'Creating view: {view_name}') logger.info(f'Getting view definition') view_definition = self.resources.get('views').get(view_name, dict()) logger.debug(f'View definition: {view_definition}') # Discover dependency views (may not be discovered earlier) dependency_views = view_definition.get( 'dependsOn', dict()).get('views', list()) logger.info(f"Dependency views: {', '.join(dependency_views)}" if dependency_views else 'No dependency views') self.athena.discover_views(dependency_views) while dependency_views: dep = dependency_views.copy().pop() # for dep in dependency_views: if dep not in self.athena._metadata.keys(): print(f'Missing dependency view: {dep}, trying to create') logger.info(f'Missing dependency view: {dep}, trying to create') self.create_view(dep) dependency_views.remove(dep) view_query = self.get_view_query(view_name=view_name) if view_definition.get('type') == 'Glue_Table': try: self.glue.create_table(json.loads(view_query)) except self.glue.client.exceptions.AlreadyExistsException: print(f'\nError: Glue table "{view_name}" exists but not found, please check your configuration, exiting') exit(1) else: self.athena.execute_query(view_query) def get_view_query(self, view_name: str) -> str: """ Returns a fully compiled AHQ """ # View path view_definition = self.resources.get('views').get(view_name, dict()) cur_required = view_definition.get('dependsOn', dict()).get('cur') if cur_required and self.cur.hasSavingsPlans and self.cur.hasReservations and view_definition.get('spriFile'): view_file = view_definition.get('spriFile') elif cur_required and self.cur.hasSavingsPlans and view_definition.get('spFile'): view_file = view_definition.get('spFile') elif cur_required and self.cur.hasReservations and view_definition.get('riFile'): view_file = view_definition.get('riFile') elif view_definition.get('File'): view_file = view_definition.get('File') else: logger.critical( f'\n"{view_name}" view information is incorrect, skipping') # Load TPL file template = Template(resource_string(view_definition.get( 'providedBy'), f'data/queries/{view_file}').decode('utf-8')) # Prepare template parameters columns_tpl = dict() columns_tpl.update({ 'cur_table_name': self.cur.tableName if cur_required else None, 'athena_database_name': self.athena.DatabaseName if view_definition.get('parameters', dict()).get('athenaDatabaseName') else None }) for k,v in view_definition.get('parameters', dict()).items(): if k == 'athenaDatabaseName': param = {'athena_database_name': self.athena.DatabaseName} elif v.get('value'): param = {k:v.get('value')} else: value = None while not value: value = click.prompt(f"Required parameter: {k} ({v.get('description')})", default=v.get('value'), show_default=True) param = {k:value} # Add parameter columns_tpl.update(param) # Compile template compiled_query = template.safe_substitute(columns_tpl) return compiled_query def map(self): """Create account mapping Athena views""" for v in ['account_map', 'aws_accounts']: self.accountMap.create(v)
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ import expect from '@kbn/expect'; import lzString from 'lz-string'; import { historyProvider } from '../history_provider'; function createState() { return { transient: { selectedPage: 'page-f3ce-4bb7-86c8-0417606d6592', selectedToplevelNodes: ['element-d88c-4bbd-9453-db22e949b92e'], resolvedArgs: {}, }, persistent: { schemaVersion: 0, time: new Date().getTime(), }, }; } describe.skip('historyProvider', () => { let history; let state; beforeEach(() => { history = historyProvider(); state = createState(); }); describe('instances', () => { it('should return the same instance for the same window object', () => { expect(historyProvider()).to.equal(history); }); it('should return different instance for different window object', () => { const newWindow = {}; expect(historyProvider(newWindow)).not.to.be(history); }); }); describe('push updates', () => { beforeEach(() => { history.push(state); }); afterEach(() => { // reset state back to initial after each test history.undo(); }); describe('push', () => { it('should add state to location', () => { expect(history.getLocation().state).to.eql(state); }); it('should push compressed state into history', () => { const hist = history.historyInstance; expect(hist.location.state).to.equal(lzString.compress(JSON.stringify(state))); }); }); describe.skip('undo', () => { it('should move history back', () => { // pushed location has state value expect(history.getLocation().state).to.eql(state); // back to initial location with null state history.undo(); expect(history.getLocation().state).to.be(null); }); }); describe.skip('redo', () => { it('should move history forward', () => { // back to initial location, with null state history.undo(); expect(history.getLocation().state).to.be(null); // return to pushed location, with state value history.redo(); expect(history.getLocation().state).to.eql(state); }); }); }); describe.skip('replace updates', () => { beforeEach(() => { history.replace(state); }); afterEach(() => { // reset history to default after each test history.replace(null); }); describe('replace', () => { it('should replace state in window history', () => { expect(history.getLocation().state).to.eql(state); }); it('should replace compressed state into history', () => { const hist = history.historyInstance; expect(hist.location.state).to.equal(lzString.compress(JSON.stringify(state))); }); }); }); describe('onChange', () => { const createOnceHandler = (history, done, fn) => { const teardown = history.onChange((location, prevLocation) => { if (typeof fn === 'function') { fn(location, prevLocation); } teardown(); done(); }); }; it('should return a method to remove the listener', () => { const handler = () => 'hello world'; const teardownFn = history.onChange(handler); expect(teardownFn).to.be.a('function'); // teardown the listener teardownFn(); }); it('should call handler on state change', done => { createOnceHandler(history, done, loc => { expect(loc).to.be.a('object'); }); history.push({}); }); it('should pass location object to handler', done => { createOnceHandler(history, done, location => { expect(location.pathname).to.be.a('string'); expect(location.hash).to.be.a('string'); expect(location.state).to.be.an('object'); expect(location.action).to.equal('push'); }); history.push(state); }); it('should pass decompressed state to handler', done => { createOnceHandler(history, done, ({ state: curState }) => { expect(curState).to.eql(state); }); history.push(state); }); it('should pass in the previous location object to handler', done => { createOnceHandler(history, done, (location, prevLocation) => { expect(prevLocation.pathname).to.be.a('string'); expect(prevLocation.hash).to.be.a('string'); expect(prevLocation.state).to.be(null); expect(prevLocation.action).to.equal('push'); }); history.push(state); }); }); describe('resetOnChange', () => { // the history onChange handler was made async and now there's no way to know when the handler was called // TODO: restore these tests. it.skip('removes listeners', () => { const createHandler = () => { let callCount = 0; function handlerFn() { callCount += 1; } handlerFn.getCallCount = () => callCount; return handlerFn; }; const handler1 = createHandler(); const handler2 = createHandler(); // attach and test the first handler history.onChange(handler1); expect(handler1.getCallCount()).to.equal(0); history.push({}); expect(handler1.getCallCount()).to.equal(1); // attach and test the second handler history.onChange(handler2); expect(handler2.getCallCount()).to.equal(0); history.push({}); expect(handler1.getCallCount()).to.equal(2); expect(handler2.getCallCount()).to.equal(1); // remove all handlers history.resetOnChange(); history.push({}); expect(handler1.getCallCount()).to.equal(2); expect(handler2.getCallCount()).to.equal(1); }); }); describe('parse', () => { it('returns the decompressed object', () => { history.push(state); const hist = history.historyInstance; const rawState = hist.location.state; expect(rawState).to.be.a('string'); expect(history.parse(rawState)).to.eql(state); }); it('returns null with invalid JSON', () => { expect(history.parse('hello')).to.be(null); }); }); describe('encode', () => { it('returns the compressed string', () => { history.push(state); const hist = history.historyInstance; const rawState = hist.location.state; expect(rawState).to.be.a('string'); expect(history.encode(state)).to.eql(rawState); }); }); });
/******************************************************************************** **** Copyright (C), 2020, xx xx xx xx info&tech Co., Ltd. **** ******************************************************************************** * File Name : rtos_test.c * Date : 2020-07-20 * Description : .use to calculate and print the info of stack and CPU usage of each task. * Version : 1.0 * Function List : * * Record : * 1.Date : 2020-07-20 * Modification: Created file *************************************************************************************************************/ #ifndef _RTOS_TEST_H_ #define _RTOS_TEST_H_ #include "stdint.h" /***************************************************************************** * Function : rtos_test_init * Description : Task into calculate and print function initialize * Input : void * Output : None * Return : * Others : * Record * 1.Date : 20200720 * Modification: Created function *****************************************************************************/ void rtos_test_init(void); void rtos_test_thread(void * arg); #endif
#!/usr/bin/env python3 ''' PortScan v3 ----------- This application scans for open ports on the designated system. It uses multiprocessing to speed up this process. ''' import socket import subprocess import sys from datetime import datetime from multiprocessing import Pool def scan(port): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((target_ip, port)) if result == 0: print("Port {}:\tOpen".format(port)) sock.close() except socket.gaierror: print('Hostname could not be resolved.') sys.exit(0) except socket.error: print("Couldn't connect to server.") sys.exit(0) except: return if __name__ == '__main__': ports = list(range(1,4096)) target = '' try: target = sys.argv[1] except: print("\nUsage:\t{} [target]\n\n\tScan for open ports on target machine.\n".format(sys.argv[0])) sys.exit(0) # Clear the screen subprocess.call('clear', shell=True) target_ip = socket.gethostbyname(target) # Print a nice banner with information on which host we are about to scan print("-" * 60) print("Please wait, scanning remote host", target_ip) print("-" * 60) # Check what time the scan started t1 = datetime.now() with Pool(processes = 8) as p: p.map(scan, ports) # Checking the time again t2 = datetime.now() # Calculates the difference of time, to see how long it took to run the script total = t2 - t1 # Printing the information to screen print('Scanning Completed in: ', total)
var models = require('../models'); var Tag = models.Tag; exports.getByName = function (tagName,callback){ Tag.findOne({tagName: tagName}, callback); }; exports.insert = function(tag,callback){ var newTag = new Tag(tag); newTag.save(callback); } exports.save = function(tag,callback){ tag.save(callback); } exports.update = function(_id,update,callback){ Tag.update({_id:_id},{$set:update},callback); } exports.getAllTags = function(callback){ Tag.find({},callback); } exports.getById = function(_id,callback){ Tag.findById(_id, callback); } exports.getByPage = function(query,fields,skip,limit,sort,callback){ Tag.find(query, fields, { skip: skip, limit: limit,sort:sort }, callback); }
import React, {Component} from 'react' import axios from 'axios' import {connect} from 'react-redux' class OrderHistory extends Component { constructor() { super() this.state = { orderHistory: [], error: false } } async componentDidMount() { try { const orderData = await axios.get( '/api/order/' + this.props.user.id ) this.setState({orderHistory: orderData.data}) } catch (error) {this.setState({error: true})} } render() { const orderHistory = this.state.orderHistory if (this.state.error) {return(<div>Error. Either we goofed, or you accessed orders from not-the-homepage. (Aye, it should work either way, but this was last minute.)</div>)} if (orderHistory.length === 0) {return(<div>Order history is empty. Buy something.</div>)} return ( <div> {orderHistory.map(order => { return ( <div> Token: {order.userToken} <div> Order details: {JSON.parse(order.userOrder).map(dinosaur => { return (<div>Name: {dinosaur.name} | Quantity: {dinosaur.quantity} | Price: {dinosaur.price}</div>) })} </div> </div> ) })} </div> ) } } export const mapStateToProps = state => { return { user: state.user, isLoggedIn: !!state.user.id } } export default connect(mapStateToProps)(OrderHistory)
from cleo import Command from .list import ListCommand # from .get import GetCommand # from .set import SetCommand # from .convert import ConvertCommand class ConfigCommand(Command): name = "config" description = "Configuration command" commands = [ ListCommand(), # GetCommand(), # SetCommand(), # ConvertCommand(), ] def handle(self): return self.call("help", self._config.name)
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_INTERPOLABLE_TRANSFORM_LIST_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_INTERPOLABLE_TRANSFORM_LIST_H_ #include <memory> #include "third_party/blink/renderer/core/animation/interpolable_value.h" #include "third_party/blink/renderer/platform/transforms/transform_operations.h" #include "third_party/blink/renderer/platform/wtf/casting.h" namespace blink { class CSSValue; class StyleResolverState; // Represents a blink::TransformOperations, converted into a form that can be // interpolated from/to. class CORE_EXPORT InterpolableTransformList final : public InterpolableValue { public: InterpolableTransformList(TransformOperations&& operations) : operations_(std::move(operations)) {} static std::unique_ptr<InterpolableTransformList> ConvertCSSValue( const CSSValue&, const StyleResolverState*); // Return the underlying TransformOperations. Usually called after composition // and interpolation, to apply the results back to the style. TransformOperations operations() const { return operations_; } void PreConcat(const InterpolableTransformList& underlying); void AccumulateOnto(const InterpolableTransformList& underlying); // InterpolableValue implementation: void Interpolate(const InterpolableValue& to, const double progress, InterpolableValue& result) const final; bool IsTransformList() const final { return true; } bool Equals(const InterpolableValue& other) const final { NOTREACHED(); return false; } void Scale(double scale) final { NOTREACHED(); } void Add(const InterpolableValue& other) final { NOTREACHED(); } void AssertCanInterpolateWith(const InterpolableValue& other) const final; private: InterpolableTransformList* RawClone() const final { return new InterpolableTransformList(TransformOperations(operations_)); } InterpolableTransformList* RawCloneAndZero() const final { NOTREACHED(); return nullptr; } TransformOperations operations_; }; template <> struct DowncastTraits<InterpolableTransformList> { static bool AllowFrom(const InterpolableValue& interpolable_value) { return interpolable_value.IsTransformList(); } }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_INTERPOLABLE_TRANSFORM_LIST_H_
"""CSP (Constraint Satisfaction Problems) problems and solvers. (Chapter 6).""" from utils import argmin_random_tie, count, first import search from collections import defaultdict from functools import reduce import itertools import re import random class CSP(search.Problem): """This class describes finite-domain Constraint Satisfaction Problems. A CSP is specified by the following inputs: variables A list of variables; each is atomic (e.g. int or string). domains A dict of {var:[possible_value, ...]} entries. neighbors A dict of {var:[var,...]} that for each variable lists the other variables that participate in constraints. constraints A function f(A, a, B, b) that returns true if neighbors A, B satisfy the constraint when they have values A=a, B=b In the textbook and in most mathematical definitions, the constraints are specified as explicit pairs of allowable values, but the formulation here is easier to express and more compact for most cases. (For example, the n-Queens problem can be represented in O(n) space using this notation, instead of O(N^4) for the explicit representation.) In terms of describing the CSP as a problem, that's all there is. However, the class also supports data structures and methods that help you solve CSPs by calling a search function on the CSP. Methods and slots are as follows, where the argument 'a' represents an assignment, which is a dict of {var:val} entries: assign(var, val, a) Assign a[var] = val; do other bookkeeping unassign(var, a) Do del a[var], plus other bookkeeping nconflicts(var, val, a) Return the number of other variables that conflict with var=val curr_domains[var] Slot: remaining consistent values for var Used by constraint propagation routines. The following methods are used only by graph_search and tree_search: actions(state) Return a list of actions result(state, action) Return a successor of state goal_test(state) Return true if all constraints satisfied The following are just for debugging purposes: nassigns Slot: tracks the number of assignments made display(a) Print a human-readable representation """ def __init__(self, variables, domains, neighbors, constraints): """Construct a CSP problem. If variables is empty, it becomes domains.keys().""" variables = variables or list(domains.keys()) self.variables = variables self.domains = domains self.neighbors = neighbors self.constraints = constraints self.initial = () self.curr_domains = None self.nassigns = 0 def assign(self, var, val, assignment): """Add {var: val} to assignment; Discard the old value if any.""" assignment[var] = val self.nassigns += 1 def unassign(self, var, assignment): """Remove {var: val} from assignment. DO NOT call this if you are changing a variable to a new value; just call assign for that.""" if var in assignment: del assignment[var] def nconflicts(self, var, val, assignment): """Return the number of conflicts var=val has with other variables.""" # Subclasses may implement this more efficiently def conflict(var2): return (var2 in assignment and not self.constraints(var, val, var2, assignment[var2])) return count(conflict(v) for v in self.neighbors[var]) def display(self, assignment): """Show a human-readable representation of the CSP.""" # Subclasses can print in a prettier way, or display with a GUI print('CSP:', self, 'with assignment:', assignment) # These methods are for the tree- and graph-search interface: def actions(self, state): """Return a list of applicable actions: nonconflicting assignments to an unassigned variable.""" if len(state) == len(self.variables): return [] else: assignment = dict(state) var = first([v for v in self.variables if v not in assignment]) return [(var, val) for val in self.domains[var] if self.nconflicts(var, val, assignment) == 0] def result(self, state, action): """Perform an action and return the new state.""" (var, val) = action return state + ((var, val),) def goal_test(self, state): """The goal is to assign all variables, with all constraints satisfied.""" assignment = dict(state) return (len(assignment) == len(self.variables) and all(self.nconflicts(variables, assignment[variables], assignment) == 0 for variables in self.variables)) # These are for constraint propagation def support_pruning(self): """Make sure we can prune values from domains. (We want to pay for this only if we use it.)""" if self.curr_domains is None: self.curr_domains = {v: list(self.domains[v]) for v in self.variables} def suppose(self, var, value): """Start accumulating inferences from assuming var=value.""" self.support_pruning() removals = [(var, a) for a in self.curr_domains[var] if a != value] self.curr_domains[var] = [value] return removals def prune(self, var, value, removals): """Rule out var=value.""" self.curr_domains[var].remove(value) if removals is not None: removals.append((var, value)) def choices(self, var): """Return all values for var that aren't currently ruled out.""" return (self.curr_domains or self.domains)[var] def infer_assignment(self): """Return the partial assignment implied by the current inferences.""" self.support_pruning() return {v: self.curr_domains[v][0] for v in self.variables if 1 == len(self.curr_domains[v])} def restore(self, removals): """Undo a supposition and all inferences from it.""" for B, b in removals: self.curr_domains[B].append(b) # This is for min_conflicts search def conflicted_vars(self, current): """Return a list of variables in current assignment that are in conflict""" return [var for var in self.variables if self.nconflicts(var, current[var], current) > 0] # ______________________________________________________________________________ # Constraint Propagation with AC-3 def AC3(csp, queue=None, removals=None): """[Figure 6.3]""" if queue is None: queue = [(Xi, Xk) for Xi in csp.variables for Xk in csp.neighbors[Xi]] csp.support_pruning() while queue: (Xi, Xj) = queue.pop() if revise(csp, Xi, Xj, removals): if not csp.curr_domains[Xi]: return False for Xk in csp.neighbors[Xi]: if Xk != Xi: queue.append((Xk, Xi)) return True def revise(csp, Xi, Xj, removals): """Return true if we remove a value.""" revised = False for x in csp.curr_domains[Xi][:]: # If Xi=x conflicts with Xj=y for every possible y, eliminate Xi=x if all(not csp.constraints(Xi, x, Xj, y) for y in csp.curr_domains[Xj]): csp.prune(Xi, x, removals) revised = True return revised # ______________________________________________________________________________ # CSP Backtracking Search # Variable ordering def first_unassigned_variable(assignment, csp): """The default variable order.""" return first([var for var in csp.variables if var not in assignment]) def mrv(assignment, csp): """Minimum-remaining-values heuristic.""" return argmin_random_tie( [v for v in csp.variables if v not in assignment], key=lambda var: num_legal_values(csp, var, assignment)) def num_legal_values(csp, var, assignment): if csp.curr_domains: return len(csp.curr_domains[var]) else: return count(csp.nconflicts(var, val, assignment) == 0 for val in csp.domains[var]) # Value ordering def unordered_domain_values(var, assignment, csp): """The default value order.""" return csp.choices(var) def lcv(var, assignment, csp): """Least-constraining-values heuristic.""" return sorted(csp.choices(var), key=lambda val: csp.nconflicts(var, val, assignment)) # Inference def no_inference(csp, var, value, assignment, removals): return True def forward_checking(csp, var, value, assignment, removals): """Prune neighbor values inconsistent with var=value.""" for B in csp.neighbors[var]: if B not in assignment: for b in csp.curr_domains[B][:]: if not csp.constraints(var, value, B, b): csp.prune(B, b, removals) if not csp.curr_domains[B]: return False return True def mac(csp, var, value, assignment, removals): """Maintain arc consistency.""" return AC3(csp, [(X, var) for X in csp.neighbors[var]], removals) # The search, proper def backtracking_search(csp, select_unassigned_variable=first_unassigned_variable, order_domain_values=unordered_domain_values, inference=no_inference): """[Figure 6.5]""" def backtrack(assignment): if len(assignment) == len(csp.variables): return assignment var = select_unassigned_variable(assignment, csp) for value in order_domain_values(var, assignment, csp): if 0 == csp.nconflicts(var, value, assignment): csp.assign(var, value, assignment) removals = csp.suppose(var, value) if inference(csp, var, value, assignment, removals): result = backtrack(assignment) if result is not None: return result csp.restore(removals) csp.unassign(var, assignment) return None result = backtrack({}) assert result is None or csp.goal_test(result) return result # ______________________________________________________________________________ # Min-conflicts hillclimbing search for CSPs def min_conflicts(csp, max_steps=100000): """Solve a CSP by stochastic hillclimbing on the number of conflicts.""" # Generate a complete assignment for all variables (probably with conflicts) csp.current = current = {} for var in csp.variables: val = min_conflicts_value(csp, var, current) csp.assign(var, val, current) # Now repeatedly choose a random conflicted variable and change it for i in range(max_steps): conflicted = csp.conflicted_vars(current) if not conflicted: return current var = random.choice(conflicted) val = min_conflicts_value(csp, var, current) csp.assign(var, val, current) return None def min_conflicts_value(csp, var, current): """Return the value that will give var the least number of conflicts. If there is a tie, choose at random.""" return argmin_random_tie(csp.domains[var], key=lambda val: csp.nconflicts(var, val, current)) # ______________________________________________________________________________ def tree_csp_solver(csp): """[Figure 6.11]""" assignment = {} root = csp.variables[0] root = 'NT' X, parent = topological_sort(csp, root) for Xj in reversed(X[1:]): if not make_arc_consistent(parent[Xj], Xj, csp): return None for Xi in X: if not csp.curr_domains[Xi]: return None assignment[Xi] = csp.curr_domains[Xi][0] return assignment def topological_sort(X, root): """Returns the topological sort of X starting from the root. Input: X is a list with the nodes of the graph N is the dictionary with the neighbors of each node root denotes the root of the graph. Output: stack is a list with the nodes topologically sorted parents is a dictionary pointing to each node's parent Other: visited shows the state (visited - not visited) of nodes """ nodes = X.variables neighbors = X.neighbors visited = defaultdict(lambda: False) stack = [] parents = {} build_topological(root, None, neighbors, visited, stack, parents) return stack, parents def build_topological(node, parent, neighbors, visited, stack, parents): """Builds the topological sort and the parents of each node in the graph""" visited[node] = True for n in neighbors[node]: if(not visited[n]): build_topological(n, node, neighbors, visited, stack, parents) parents[node] = parent stack.insert(0,node) def make_arc_consistent(Xj, Xk, csp): raise NotImplementedError # ______________________________________________________________________________ # Map-Coloring Problems class UniversalDict: """A universal dict maps any key to the same value. We use it here as the domains dict for CSPs in which all variables have the same domain. >>> d = UniversalDict(42) >>> d['life'] 42 """ def __init__(self, value): self.value = value def __getitem__(self, key): return self.value def __repr__(self): return '{{Any: {0!r}}}'.format(self.value) def different_values_constraint(A, a, B, b): """A constraint saying two neighboring variables must differ in value.""" return a != b def MapColoringCSP(colors, neighbors): """Make a CSP for the problem of coloring a map with different colors for any two adjacent regions. Arguments are a list of colors, and a dict of {region: [neighbor,...]} entries. This dict may also be specified as a string of the form defined by parse_neighbors.""" if isinstance(neighbors, str): neighbors = parse_neighbors(neighbors) return CSP(list(neighbors.keys()), UniversalDict(colors), neighbors, different_values_constraint) def parse_neighbors(neighbors, variables=[]): """Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping regions to neighbors. The syntax is a region name followed by a ':' followed by zero or more region names, followed by ';', repeated for each region name. If you say 'X: Y' you don't need 'Y: X'. >>> parse_neighbors('X: Y Z; Y: Z') == {'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']} True """ dic = defaultdict(list) specs = [spec.split(':') for spec in neighbors.split(';')] for (A, Aneighbors) in specs: A = A.strip() for B in Aneighbors.split(): dic[A].append(B) dic[B].append(A) return dic australia = MapColoringCSP(list('RGB'), 'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ') usa = MapColoringCSP(list('RGBY'), """WA: OR ID; OR: ID NV CA; CA: NV AZ; NV: ID UT AZ; ID: MT WY UT; UT: WY CO AZ; MT: ND SD WY; WY: SD NE CO; CO: NE KA OK NM; NM: OK TX; ND: MN SD; SD: MN IA NE; NE: IA MO KA; KA: MO OK; OK: MO AR TX; TX: AR LA; MN: WI IA; IA: WI IL MO; MO: IL KY TN AR; AR: MS TN LA; LA: MS; WI: MI IL; IL: IN KY; IN: OH KY; MS: TN AL; AL: TN GA FL; MI: OH IN; OH: PA WV KY; KY: WV VA TN; TN: VA NC GA; GA: NC SC FL; PA: NY NJ DE MD WV; WV: MD VA; VA: MD DC NC; NC: SC; NY: VT MA CT NJ; NJ: DE; DE: MD; MD: DC; VT: NH MA; MA: NH RI CT; CT: RI; ME: NH; HI: ; AK: """) france = MapColoringCSP(list('RGBY'), """AL: LO FC; AQ: MP LI PC; AU: LI CE BO RA LR MP; BO: CE IF CA FC RA AU; BR: NB PL; CA: IF PI LO FC BO; CE: PL NB NH IF BO AU LI PC; FC: BO CA LO AL RA; IF: NH PI CA BO CE; LI: PC CE AU MP AQ; LO: CA AL FC; LR: MP AU RA PA; MP: AQ LI AU LR; NB: NH CE PL BR; NH: PI IF CE NB; NO: PI; PA: LR RA; PC: PL CE LI AQ; PI: NH NO CA IF; PL: BR NB CE PC; RA: AU BO FC PA LR""") # ______________________________________________________________________________ # n-Queens Problem def queen_constraint(A, a, B, b): """Constraint is satisfied (true) if A, B are really the same variable, or if they are not in the same row, down diagonal, or up diagonal.""" return A == B or (a != b and A + a != B + b and A - a != B - b) class NQueensCSP(CSP): """Make a CSP for the nQueens problem for search with min_conflicts. Suitable for large n, it uses only data structures of size O(n). Think of placing queens one per column, from left to right. That means position (x, y) represents (var, val) in the CSP. The main structures are three arrays to count queens that could conflict: rows[i] Number of queens in the ith row (i.e val == i) downs[i] Number of queens in the \ diagonal such that their (x, y) coordinates sum to i ups[i] Number of queens in the / diagonal such that their (x, y) coordinates have x-y+n-1 = i We increment/decrement these counts each time a queen is placed/moved from a row/diagonal. So moving is O(1), as is nconflicts. But choosing a variable, and a best value for the variable, are each O(n). If you want, you can keep track of conflicted variables, then variable selection will also be O(1). >>> len(backtracking_search(NQueensCSP(8))) 8 """ def __init__(self, n): """Initialize data structures for n Queens.""" CSP.__init__(self, list(range(n)), UniversalDict(list(range(n))), UniversalDict(list(range(n))), queen_constraint) self.rows = [0]*n self.ups = [0]*(2*n - 1) self.downs = [0]*(2*n - 1) def nconflicts(self, var, val, assignment): """The number of conflicts, as recorded with each assignment. Count conflicts in row and in up, down diagonals. If there is a queen there, it can't conflict with itself, so subtract 3.""" n = len(self.variables) c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-1] if assignment.get(var, None) == val: c -= 3 return c def assign(self, var, val, assignment): """Assign var, and keep track of conflicts.""" oldval = assignment.get(var, None) if val != oldval: if oldval is not None: # Remove old val if there was one self.record_conflict(assignment, var, oldval, -1) self.record_conflict(assignment, var, val, +1) CSP.assign(self, var, val, assignment) def unassign(self, var, assignment): """Remove var from assignment (if it is there) and track conflicts.""" if var in assignment: self.record_conflict(assignment, var, assignment[var], -1) CSP.unassign(self, var, assignment) def record_conflict(self, assignment, var, val, delta): """Record conflicts caused by addition or deletion of a Queen.""" n = len(self.variables) self.rows[val] += delta self.downs[var + val] += delta self.ups[var - val + n - 1] += delta def display(self, assignment): """Print the queens and the nconflicts values (for debugging).""" n = len(self.variables) for val in range(n): for var in range(n): if assignment.get(var, '') == val: ch = 'Q' elif (var + val) % 2 == 0: ch = '.' else: ch = '-' print(ch, end=' ') print(' ', end=' ') for var in range(n): if assignment.get(var, '') == val: ch = '*' else: ch = ' ' print(str(self.nconflicts(var, val, assignment)) + ch, end=' ') print() # ______________________________________________________________________________ # Sudoku def flatten(seqs): return sum(seqs, []) easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..' # noqa harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' # noqa _R3 = list(range(3)) _CELL = itertools.count().__next__ _BGRID = [[[[_CELL() for x in _R3] for y in _R3] for bx in _R3] for by in _R3] _BOXES = flatten([list(map(flatten, brow)) for brow in _BGRID]) _ROWS = flatten([list(map(flatten, zip(*brow))) for brow in _BGRID]) _COLS = list(zip(*_ROWS)) _NEIGHBORS = {v: set() for v in flatten(_ROWS)} for unit in map(set, _BOXES + _ROWS + _COLS): for v in unit: _NEIGHBORS[v].update(unit - {v}) class Sudoku(CSP): """A Sudoku problem. The box grid is a 3x3 array of boxes, each a 3x3 array of cells. Each cell holds a digit in 1..9. In each box, all digits are different; the same for each row and column as a 9x9 grid. >>> e = Sudoku(easy1) >>> e.display(e.infer_assignment()) . . 3 | . 2 . | 6 . . 9 . . | 3 . 5 | . . 1 . . 1 | 8 . 6 | 4 . . ------+-------+------ . . 8 | 1 . 2 | 9 . . 7 . . | . . . | . . 8 . . 6 | 7 . 8 | 2 . . ------+-------+------ . . 2 | 6 . 9 | 5 . . 8 . . | 2 . 3 | . . 9 . . 5 | . 1 . | 3 . . >>> AC3(e); e.display(e.infer_assignment()) True 4 8 3 | 9 2 1 | 6 5 7 9 6 7 | 3 4 5 | 8 2 1 2 5 1 | 8 7 6 | 4 9 3 ------+-------+------ 5 4 8 | 1 3 2 | 9 7 6 7 2 9 | 5 6 4 | 1 3 8 1 3 6 | 7 9 8 | 2 4 5 ------+-------+------ 3 7 2 | 6 8 9 | 5 1 4 8 1 4 | 2 5 3 | 7 6 9 6 9 5 | 4 1 7 | 3 8 2 >>> h = Sudoku(harder1) >>> backtracking_search(h, select_unassigned_variable=mrv, inference=forward_checking) is not None True """ # noqa R3 = _R3 Cell = _CELL bgrid = _BGRID boxes = _BOXES rows = _ROWS cols = _COLS neighbors = _NEIGHBORS def __init__(self, grid): """Build a Sudoku problem from a string representing the grid: the digits 1-9 denote a filled cell, '.' or '0' an empty one; other characters are ignored.""" squares = iter(re.findall(r'\d|\.', grid)) domains = {var: [ch] if ch in '123456789' else '123456789' for var, ch in zip(flatten(self.rows), squares)} for _ in squares: raise ValueError("Not a Sudoku grid", grid) # Too many squares CSP.__init__(self, None, domains, self.neighbors, different_values_constraint) def display(self, assignment): def show_box(box): return [' '.join(map(show_cell, row)) for row in box] def show_cell(cell): return str(assignment.get(cell, '.')) def abut(lines1, lines2): return list( map(' | '.join, list(zip(lines1, lines2)))) print('\n------+-------+------\n'.join( '\n'.join(reduce( abut, map(show_box, brow))) for brow in self.bgrid)) # ______________________________________________________________________________ # The Zebra Puzzle def Zebra(): """Return an instance of the Zebra Puzzle.""" Colors = 'Red Yellow Blue Green Ivory'.split() Pets = 'Dog Fox Snails Horse Zebra'.split() Drinks = 'OJ Tea Coffee Milk Water'.split() Countries = 'Englishman Spaniard Norwegian Ukranian Japanese'.split() Smokes = 'Kools Chesterfields Winston LuckyStrike Parliaments'.split() variables = Colors + Pets + Drinks + Countries + Smokes domains = {} for var in variables: domains[var] = list(range(1, 6)) domains['Norwegian'] = [1] domains['Milk'] = [3] neighbors = parse_neighbors("""Englishman: Red; Spaniard: Dog; Kools: Yellow; Chesterfields: Fox; Norwegian: Blue; Winston: Snails; LuckyStrike: OJ; Ukranian: Tea; Japanese: Parliaments; Kools: Horse; Coffee: Green; Green: Ivory""", variables) for type in [Colors, Pets, Drinks, Countries, Smokes]: for A in type: for B in type: if A != B: if B not in neighbors[A]: neighbors[A].append(B) if A not in neighbors[B]: neighbors[B].append(A) def zebra_constraint(A, a, B, b, recurse=0): same = (a == b) next_to = abs(a - b) == 1 if A == 'Englishman' and B == 'Red': return same if A == 'Spaniard' and B == 'Dog': return same if A == 'Chesterfields' and B == 'Fox': return next_to if A == 'Norwegian' and B == 'Blue': return next_to if A == 'Kools' and B == 'Yellow': return same if A == 'Winston' and B == 'Snails': return same if A == 'LuckyStrike' and B == 'OJ': return same if A == 'Ukranian' and B == 'Tea': return same if A == 'Japanese' and B == 'Parliaments': return same if A == 'Kools' and B == 'Horse': return next_to if A == 'Coffee' and B == 'Green': return same if A == 'Green' and B == 'Ivory': return a - 1 == b if recurse == 0: return zebra_constraint(B, b, A, a, 1) if ((A in Colors and B in Colors) or (A in Pets and B in Pets) or (A in Drinks and B in Drinks) or (A in Countries and B in Countries) or (A in Smokes and B in Smokes)): return not same raise Exception('error') return CSP(variables, domains, neighbors, zebra_constraint) def solve_zebra(algorithm=min_conflicts, **args): z = Zebra() ans = algorithm(z, **args) for h in range(1, 6): print('House', h, end=' ') for (var, val) in ans.items(): if val == h: print(var, end=' ') print() return ans['Zebra'], ans['Water'], z.nassigns, ans
#!/usr/bin/env python # ''' DF-MP2 natural orbitals for the allyl radical ''' from pyscf.gto import Mole from pyscf.scf import UHF from pyscf.tools import molden from pyscf.mp.dfump2_native import DFMP2 mol = Mole() mol.atom = ''' C -1.1528 -0.1151 -0.4645 C 0.2300 -0.1171 -0.3508 C 0.9378 0.2246 0.7924 H 0.4206 0.5272 1.7055 H 2.0270 0.2021 0.8159 H -1.6484 -0.3950 -1.3937 H -1.7866 0.1687 0.3784 H 0.8086 -0.4120 -1.2337 ''' mol.basis = 'def2-TZVP' mol.spin = 1 mol.build() mf = UHF(mol).run() # MP2 natural occupation numbers and natural orbitals natocc, natorb = DFMP2(mf).make_natorbs() # store the natural orbitals in a molden file molden.from_mo(mol, 'allyl_mp2nat.molden', natorb, occ=natocc)
// // YPViewController.h // YPAVAssetResourceLoader // // Created by yiplee on 12/08/2017. // Copyright (c) 2017 yiplee. All rights reserved. // @import UIKit; @interface YPViewController : UIViewController @end
const Article = require('../models/Article'); const Comment = require('../models/Comment'); module.exports = { getArticles: (req, res) => { Article.find() .then((articles) => { let resArticles = articles.sort((a, b) => b.date - a.date) res .status(200) .json({ message: 'Fetched articles successfully.', resArticles }); }) .catch((error, res) => { res.status(500) .json({ message: 'Something went wrong.', error }) }); }, createArticle: (req, res) => { const articleObj = req.body; Article.create(articleObj) .then((article) => { res.status(200) .json({ message: 'Article created successfully!', article }) }) .catch((error) => { res.status(500) .json({ message: 'Something went wrong.', error }) }); }, articleDetails: (req, res) => { const articleId = req.params.id; Article.findById(articleId) .then((article) => { let arr = []; Comment.find() .then((comments) => { comments = comments.filter(function(ele){ return ele.article == articleId; }).sort((a, b) => (b.date - a.date)) res.status(200) .json({ message: 'Details fetched successfully.', article, comments }) }) }) .catch((error) => { res.status(500) .json({ message: 'Something went wrong.', error }) }); }, editArticle: (req, res) => { const articleId = req.params.id; Article.findById(articleId) .then((article) => { article.title = req.body.title; article.content = req.body.content; article.imageUrl = req.body.imageUrl; article.save() .then(() => { res.status(200) .json({ message: 'Article edited successfully.', article, }) }) }) .catch((error) => { res.status(500) .json({ message: 'Something went wrong.', error }) }); }, deleteArticle: (req, res) => { const articleId = req.params.id; Article.findByIdAndDelete(articleId) .then(() => { Comment.deleteMany({ article: articleId }) .then(() => { res.status(200) .json({ message: 'Article deleted successfully.', }) }) }) .catch((error) => { res.status(500) .json({ message: 'Something went wrong.', error }) }); } }
# Write results to this file OUTFILE = 'runs/10KB/par-bro-iter01000.result.csv' # Source computers for the requests SOURCE = ['10.0.0.1'] # Should Bro be enabled on the source machines? SOURCE_BRO = [True] # Target machines for the requests (aka server) TARGET = ['10.0.0.2'] # Should Bro be enabled on the target machines? TARGET_BRO = [True] # Connection mode (par = parallel, seq = sequential) MODE = 'par' # Number of evaluation repetitions to run EPOCHS = 100 # Number of iterations to be run in each evaluation repetition ITER = 1000 # Size of the file to be downloaded from target (in Bytes * 10^SIZE) SIZE = 4
const express = require("express"); var cookieParser = require('cookie-parser') const app = express(); const mongoose = require("mongoose"); const port = process.env.PORT || 9501; const routes = require("./routes"); require("./db/conn"); app.use(express.json()); app.use(cookieParser()); app.use(routes); app.listen(port, () => { console.log(`connection is set up at port : ${port}`); })
from functools import lru_cache import logging from typing import Iterable, Optional, Union from arche.data_quality_report import DataQualityReport from arche.readers.items import Items, CollectionItems, JobItems, RawItems import arche.readers.schema as sr from arche.report import Report import arche.rules.category as category_rules import arche.rules.coverage as coverage_rules import arche.rules.duplicates as duplicate_rules import arche.rules.json_schema as schema_rules import arche.rules.metadata as metadata_rules from arche.rules.others import compare_boolean_fields, garbage_symbols import arche.rules.price as price_rules from arche.tools import api, helpers import IPython import pandas as pd class Arche: def __init__( self, source: Union[str, pd.DataFrame, RawItems], schema: Optional[sr.SchemaSource] = None, target: Optional[Union[str, pd.DataFrame]] = None, start: int = 0, count: Optional[int] = None, filters: Optional[api.Filters] = None, expand: bool = True, ): """ Args: source: a data source to validate, accepts job keys, pandas df, lists schema: a JSON schema source used to run validation target: a data source to compare with start: an item number to start reading from count: the amount of items to read from start filters: Scrapinghub filtering, see https://python-scrapinghub.readthedocs.io/en/latest/client/apidocs.html#scrapinghub.client.items.Items # noqa expand: if True, use flattened data in garbage rules, affects performance see flatten_df """ if isinstance(source, str) and target == source: raise ValueError( "'target' is equal to 'source'. Data to compare should have different sources." ) if isinstance(source, pd.DataFrame): logging.warning( "Pandas stores `NA` (missing) data differently, " "which might affect schema validation. " "Should you care, consider passing raw data in array-like types.\n" "For more details, see https://pandas.pydata.org/pandas-docs/" "stable/user_guide/gotchas.html#nan-integer-na-values-and-na-type-promotions" ) self.source = source self._schema = None self.schema_source = None if schema: self.schema = sr.get_schema(schema) self.target = target self.start = start self.count = count self.filters = filters self.expand = expand self._source_items = None self._target_items = None self.report = Report() @property def source_items(self): if not self._source_items: self._source_items = self.get_items( self.source, self.start, self.count, self.filters, self.expand ) return self._source_items @property def target_items(self): if self.target is None: return None if not self._target_items: self._target_items = self.get_items( self.target, self.start, self.count, self.filters, self.expand ) return self._target_items @property def schema(self): if not self._schema and self.schema_source: self._schema = sr.get_schema(self.schema_source) return self._schema @schema.setter def schema(self, schema_source): self.schema_source = schema_source self._schema = sr.get_schema(schema_source) @staticmethod def get_items( source: Union[str, pd.DataFrame, RawItems], start: int, count: Optional[int], filters: Optional[api.Filters], expand: bool, ) -> Union[JobItems, CollectionItems]: if isinstance(source, pd.DataFrame): return Items.from_df(source, expand=expand) elif isinstance(source, Iterable) and not isinstance(source, str): return Items.from_array(source, expand=expand) elif helpers.is_job_key(source): return JobItems( key=source, start=start, count=count, filters=filters, expand=expand ) elif helpers.is_collection_key(source): if start: raise ValueError("Collections API does not support 'start' parameter") return CollectionItems( key=source, count=count, filters=filters, expand=expand ) else: raise ValueError(f"'{source}' is not a valid job or collection key") def save_result(self, rule_result): self.report.save(rule_result) def report_all(self, short: bool = False) -> None: self.run_all_rules() IPython.display.clear_output() self.report.write_summaries() self.report.write("\n" * 2) self.report.write_details(short) def run_all_rules(self): if isinstance(self.source_items, JobItems): self.check_metadata(self.source_items.job) if self.target_items: self.compare_metadata(self.source_items.job, self.target_items.job) self.run_general_rules() self.run_comparison_rules() self.run_schema_rules() def data_quality_report(self, bucket: Optional[str] = None): if helpers.is_collection_key(self.source): raise ValueError("Collections are not supported") if not self.schema: raise ValueError("Schema is empty") IPython.display.clear_output() DataQualityReport(self.source_items, self.schema, self.report, bucket) @lru_cache(maxsize=32) def run_general_rules(self): self.save_result(garbage_symbols(self.source_items)) df = self.source_items.df self.save_result( coverage_rules.check_fields_coverage( df.drop(columns=df.columns[df.columns.str.startswith("_")]) ) ) self.save_result(category_rules.get_categories(df)) def validate_with_json_schema(self) -> None: """Run JSON schema check and output results. It will try to find all errors, but there are no guarantees. Slower than `check_with_json_schema()` """ res = schema_rules.validate(self.schema, self.source_items.raw) self.save_result(res) res.show() def glance(self) -> None: """Run JSON schema check and output results. In most cases it will return only the first error per item. Usable for big jobs as it's about 100x faster than `validate_with_json_schema()`. """ res = schema_rules.validate(self.schema, self.source_items.raw, fast=True) self.save_result(res) res.show() def run_schema_rules(self) -> None: if not self.schema: return self.save_result(schema_rules.validate(self.schema, self.source_items.raw)) tagged_fields = sr.Tags().get(self.schema) target_columns = ( self.target_items.df.columns.values if self.target_items else None ) check_tags_result = schema_rules.check_tags( self.source_items.df.columns.values, target_columns, tagged_fields ) self.save_result(check_tags_result) if check_tags_result.errors: return self.run_customized_rules(self.source_items, tagged_fields) self.compare_with_customized_rules( self.source_items, self.target_items, tagged_fields ) def run_customized_rules(self, items, tagged_fields): self.save_result(price_rules.compare_was_now(items.df, tagged_fields)) self.save_result(duplicate_rules.check_uniqueness(items.df, tagged_fields)) self.save_result(duplicate_rules.check_items(items.df, tagged_fields)) self.save_result( category_rules.get_coverage_per_category( items.df, tagged_fields.get("category", []) ) ) @lru_cache(maxsize=32) def check_metadata(self, job): self.save_result(metadata_rules.check_outcome(job)) self.save_result(metadata_rules.check_errors(job)) self.save_result(metadata_rules.check_response_ratio(job)) @lru_cache(maxsize=32) def compare_metadata(self, source_job, target_job): self.save_result(metadata_rules.compare_spider_names(source_job, target_job)) self.save_result(metadata_rules.compare_errors(source_job, target_job)) self.save_result( metadata_rules.compare_number_of_scraped_items(source_job, target_job) ) self.save_result(coverage_rules.get_difference(source_job, target_job)) self.save_result(metadata_rules.compare_response_ratio(source_job, target_job)) self.save_result(metadata_rules.compare_runtime(source_job, target_job)) self.save_result(metadata_rules.compare_finish_time(source_job, target_job)) @lru_cache(maxsize=32) def run_comparison_rules(self): if not self.target_items: return for r in [coverage_rules.compare_scraped_fields, compare_boolean_fields]: self.save_result(r(self.source_items.df, self.target_items.df)) def compare_with_customized_rules(self, source_items, target_items, tagged_fields): if not target_items: return self.save_result( category_rules.get_difference( source_items.df, target_items.df, tagged_fields.get("category", []) ) ) for r in [ price_rules.compare_prices_for_same_urls, price_rules.compare_names_for_same_urls, price_rules.compare_prices_for_same_names, ]: self.save_result(r(source_items.df, target_items.df, tagged_fields))
var searchData= [ ['batch2space',['BATCH2SPACE',['../namespacerk_1_1nn.html#a6a02b2d1d62293b20242e3dcfbdd0117a1da63a60f350a062e3bb5bb3dcd31a47',1,'rk::nn']]], ['batch_5fnorm',['BATCH_NORM',['../namespacerk_1_1nn.html#a6a02b2d1d62293b20242e3dcfbdd0117a459ac7deae89644bcd5b99c9aac222a4',1,'rk::nn']]], ['bool8',['BOOL8',['../namespacerk_1_1nn.html#a13c421245f43fd2d7edd0e94c537965da05afd9eb8887a406d47474cd3809a5dd',1,'rk::nn']]] ];
// This is a manifest file that'll be compiled into application.js, which will include all the files // listed below. // // Any JavaScript/Coffee file within this directory, lib/assets/javascripts, or any plugin's // vendor/assets/javascripts directory can be referenced here using a relative path. // // It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the // compiled file. JavaScript code in this file should be added after the last require_* statement. // // Read Sprockets README (https://github.com/rails/sprockets#sprockets-directives) for details // about supported directives. // //= require jquery //= require rails-ujs //= require jquery-ui //= require bootstrap //= require turbolinks //= require_tree .
import asyncio import random from pathlib import Path import sqlite3 import aiosqlite import pytest from src.full_node.block_store import BlockStore from src.consensus.blockchain import Blockchain from src.full_node.coin_store import CoinStore from tests.setup_nodes import test_constants, bt @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop class TestBlockStore: @pytest.mark.asyncio async def test_block_store(self): assert sqlite3.threadsafety == 1 blocks = bt.get_consecutive_blocks(10) db_filename = Path("blockchain_test.db") db_filename_2 = Path("blockchain_test2.db") if db_filename.exists(): db_filename.unlink() if db_filename_2.exists(): db_filename_2.unlink() connection = await aiosqlite.connect(db_filename) connection_2 = await aiosqlite.connect(db_filename_2) # Use a different file for the blockchain coin_store_2 = await CoinStore.create(connection_2) store_2 = await BlockStore.create(connection_2) bc = await Blockchain.create(coin_store_2, store_2, test_constants) store = await BlockStore.create(connection) await BlockStore.create(connection_2) try: # Save/get block for block in blocks: await bc.receive_block(block) sub_block = bc.sub_block_record(block.header_hash) sub_block_hh = sub_block.header_hash await store.add_full_block(block, sub_block) await store.add_full_block(block, sub_block) assert block == await store.get_full_block(block.header_hash) assert block == await store.get_full_block(block.header_hash) assert sub_block == (await store.get_sub_block_record(sub_block_hh)) await store.set_peak(sub_block.header_hash) await store.set_peak(sub_block.header_hash) assert len(await store.get_full_blocks_at([1])) == 1 assert len(await store.get_full_blocks_at([0])) == 1 assert len(await store.get_full_blocks_at([100])) == 0 # Get sub blocks sub_block_records = await store.get_sub_block_records() assert len(sub_block_records[0]) == len(blocks) # Peak is correct assert sub_block_records[1] == blocks[-1].header_hash except Exception: await connection.close() await connection_2.close() db_filename.unlink() db_filename_2.unlink() raise await connection.close() await connection_2.close() db_filename.unlink() db_filename_2.unlink() @pytest.mark.asyncio async def test_deadlock(self): """ This test was added because the store was deadlocking in certain situations, when fetching and adding blocks repeatedly. The issue was patched. """ blocks = bt.get_consecutive_blocks(10) db_filename = Path("blockchain_test.db") db_filename_2 = Path("blockchain_test2.db") if db_filename.exists(): db_filename.unlink() if db_filename_2.exists(): db_filename_2.unlink() connection = await aiosqlite.connect(db_filename) connection_2 = await aiosqlite.connect(db_filename_2) store = await BlockStore.create(connection) coin_store_2 = await CoinStore.create(connection_2) store_2 = await BlockStore.create(connection_2) bc = await Blockchain.create(coin_store_2, store_2, test_constants) sub_block_records = [] for block in blocks: await bc.receive_block(block) sub_block_records.append(bc.sub_block_record(block.header_hash)) tasks = [] for i in range(10000): rand_i = random.randint(0, 9) if random.random() < 0.5: tasks.append(asyncio.create_task(store.add_full_block(blocks[rand_i], sub_block_records[rand_i]))) if random.random() < 0.5: tasks.append(asyncio.create_task(store.get_full_block(blocks[rand_i].header_hash))) await asyncio.gather(*tasks) await connection.close() await connection_2.close() db_filename.unlink() db_filename_2.unlink()
/*! * (C) Ionic http://ionicframework.com - MIT License * Built with http://stenciljs.com */ import*as tslib_1 from"../polyfills/tslib.js";import{h}from"../ionic.core.js";import{a as rIC,k as isPlatform}from"./chunk-c23403d0.js";import{h as createColorClasses,j as hostContext,k as createThemedClasses}from"./chunk-b9ec67ac.js";var App=function(){function t(){}return t.prototype.componentDidLoad=function(){var t=this;rIC(function(){var e=t.win,o=t.config,r=t.queue;o.getBoolean("_testing")||importTapClick(e),importInputShims(e,o),importStatusTap(e,o,r),importHardwareBackButton(e,o)})},t.prototype.hostData=function(){return{class:{"ion-page":!0,"force-statusbar-padding":this.config.getBoolean("_forceStatusbarPadding")}}},Object.defineProperty(t,"is",{get:function(){return"ion-app"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{config:{context:"config"},el:{elementRef:!0},queue:{context:"queue"},win:{context:"window"}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return"html.plt-mobile ion-app{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}ion-app.force-statusbar-padding{--ion-safe-area-top:20px}"},enumerable:!0,configurable:!0}),t}();function importHardwareBackButton(t,e){e.getBoolean("hardwareBackButton",isPlatform(t,"hybrid"))&&import("./hardware-back-button.js").then(function(e){return e.startHardwareBackButton(t)})}function importStatusTap(t,e,o){e.getBoolean("statusTap",isPlatform(t,"hybrid"))&&import("./status-tap.js").then(function(e){return e.startStatusTap(t,o)})}function importTapClick(t){import("./tap-click.js").then(function(e){return e.startTapClick(t.document)})}function importInputShims(t,e){e.getBoolean("inputShims",needInputShims(t))&&import("./input-shims.js").then(function(o){return o.startInputShims(t.document,e)})}function needInputShims(t){return isPlatform(t,"ios")&&isPlatform(t,"mobile")}var Buttons=function(){function t(){}return Object.defineProperty(t,"is",{get:function(){return"ion-buttons"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"encapsulation",{get:function(){return"scoped"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return".sc-ion-buttons-ios-h{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-webkit-transform:translateZ(0);transform:translateZ(0);z-index:99}.sc-ion-buttons-ios-s ion-button {--margin-top:0;--margin-bottom:0;--margin-start:0;--margin-end:0;--padding-start:0;--padding-end:0;--box-shadow:none;margin-left:2px;margin-right:2px;--padding-top:0;--padding-bottom:0;--padding-start:5px;--padding-end:5px;--height:32px;font-size:17px;font-weight:400}.sc-ion-buttons-ios-s ion-button:not(.button-round) {--border-radius:4px}.sc-ion-buttons-ios-h.ion-color.sc-ion-buttons-ios-s .button , .ion-color .sc-ion-buttons-ios-h.sc-ion-buttons-ios-s .button {--color:initial;--color-activated:initial}\@media (any-hover:hover){.sc-ion-buttons-ios-s .button-solid-ios:hover {opacity:.4}}.sc-ion-buttons-ios-s ion-icon[slot=start] {margin:0;margin-right:.3em;font-size:24px;line-height:.67}.sc-ion-buttons-ios-s ion-icon[slot=end] {margin:0;margin-left:.4em;font-size:24px;line-height:.67}.sc-ion-buttons-ios-s ion-icon[slot=icon-only] {padding:0;margin:0;font-size:31px;line-height:.67}[slot=start].sc-ion-buttons-ios-h{-ms-flex-order:2;order:2}[slot=secondary].sc-ion-buttons-ios-h{-ms-flex-order:3;order:3}[slot=primary].sc-ion-buttons-ios-h{-ms-flex-order:5;order:5;text-align:end}[slot=end].sc-ion-buttons-ios-h{-ms-flex-order:6;order:6;text-align:end}"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"styleMode",{get:function(){return"ios"},enumerable:!0,configurable:!0}),t}(),Content=function(){function t(){this.isScrolling=!1,this.lastScroll=0,this.queued=!1,this.cTop=-1,this.cBottom=-1,this.detail={scrollTop:0,scrollLeft:0,type:"scroll",event:void 0,startX:0,startY:0,startTimeStamp:0,currentX:0,currentY:0,velocityX:0,velocityY:0,deltaX:0,deltaY:0,timeStamp:0,data:void 0,isScrolling:!0},this.fullscreen=!1,this.scrollX=!1,this.scrollY=!0,this.scrollEvents=!1}return t.prototype.onNavChanged=function(){this.resize()},t.prototype.componentWillLoad=function(){void 0===this.forceOverscroll&&(this.forceOverscroll="ios"===this.mode&&isPlatform(this.win,"mobile"))},t.prototype.componentDidLoad=function(){this.resize()},t.prototype.componentDidUnload=function(){this.watchDog&&clearInterval(this.watchDog)},t.prototype.resize=function(){this.fullscreen?this.queue.read(this.readDimensions.bind(this)):0===this.cTop&&0===this.cBottom||(this.cTop=this.cBottom=0,this.el.forceUpdate())},t.prototype.readDimensions=function(){var t=getPageElement(this.el),e=Math.max(this.el.offsetTop,0),o=Math.max(t.offsetHeight-e-this.el.offsetHeight,0);(e!==this.cTop||o!==this.cBottom)&&(this.cTop=e,this.cBottom=o,this.el.forceUpdate())},t.prototype.onScroll=function(t){var e=this,o=Date.now(),r=!this.isScrolling;this.lastScroll=o,r&&this.onScrollStart(),!this.queued&&this.scrollEvents&&(this.queued=!0,this.queue.read(function(o){e.queued=!1,e.detail.event=t,updateScrollDetail(e.detail,e.scrollEl,o,r),e.ionScroll.emit(e.detail)}))},t.prototype.getScrollElement=function(){return Promise.resolve(this.scrollEl)},t.prototype.scrollToTop=function(t){return void 0===t&&(t=0),this.scrollToPoint(void 0,0,t)},t.prototype.scrollToBottom=function(t){return void 0===t&&(t=0),this.scrollToPoint(void 0,this.scrollEl.scrollHeight-this.scrollEl.clientHeight,t)},t.prototype.scrollByPoint=function(t,e,o){return this.scrollToPoint(t+this.scrollEl.scrollLeft,e+this.scrollEl.scrollTop,o)},t.prototype.scrollToPoint=function(t,e,o){return void 0===o&&(o=0),tslib_1.__awaiter(this,void 0,void 0,function(){var r,n,l,i,c,s,a,u,p;return tslib_1.__generator(this,function(f){return r=this.scrollEl,o<32?(null!=e&&(r.scrollTop=e),null!=t&&(r.scrollLeft=t),[2]):(l=0,i=new Promise(function(t){return n=t}),c=r.scrollTop,s=r.scrollLeft,a=null!=e?e-c:0,u=null!=t?t-s:0,p=function(t){var e=Math.min(1,(t-l)/o)-1,i=Math.pow(e,3)+1;0!==a&&(r.scrollTop=Math.floor(i*a+c)),0!==u&&(r.scrollLeft=Math.floor(i*u+s)),i<1?requestAnimationFrame(p):n()},requestAnimationFrame(function(t){l=t,p(t)}),[2,i])})})},t.prototype.onScrollStart=function(){var t=this;this.isScrolling=!0,this.ionScrollStart.emit({isScrolling:!0}),this.watchDog&&clearInterval(this.watchDog),this.watchDog=setInterval(function(){t.lastScroll<Date.now()-120&&t.onScrollEnd()},100)},t.prototype.onScrollEnd=function(){clearInterval(this.watchDog),this.watchDog=null,this.isScrolling=!1,this.ionScrollEnd.emit({isScrolling:!1})},t.prototype.hostData=function(){return{class:Object.assign({},createColorClasses(this.color),{"content-sizing":hostContext("ion-popover",this.el),overscroll:!!this.forceOverscroll}),style:{"--offset-top":this.cTop+"px","--offset-bottom":this.cBottom+"px"}}},t.prototype.render=function(){var t=this,e=this.scrollX,o=this.scrollY,r=this.forceOverscroll;return this.resize(),[h("div",{class:{"inner-scroll":!0,"scroll-x":e,"scroll-y":o,overscroll:(e||o)&&!!r},ref:function(e){return t.scrollEl=e},onScroll:function(e){return t.onScroll(e)}},h("slot",null)),h("slot",{name:"fixed"})]},Object.defineProperty(t,"is",{get:function(){return"ion-content"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"encapsulation",{get:function(){return"shadow"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{color:{type:String,attr:"color"},config:{context:"config"},el:{elementRef:!0},forceOverscroll:{type:Boolean,attr:"force-overscroll",mutable:!0},fullscreen:{type:Boolean,attr:"fullscreen"},getScrollElement:{method:!0},queue:{context:"queue"},scrollByPoint:{method:!0},scrollEvents:{type:Boolean,attr:"scroll-events"},scrollToBottom:{method:!0},scrollToPoint:{method:!0},scrollToTop:{method:!0},scrollX:{type:Boolean,attr:"scroll-x"},scrollY:{type:Boolean,attr:"scroll-y"},win:{context:"window"}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"events",{get:function(){return[{name:"ionScrollStart",method:"ionScrollStart",bubbles:!0,cancelable:!0,composed:!0},{name:"ionScroll",method:"ionScroll",bubbles:!0,cancelable:!0,composed:!0},{name:"ionScrollEnd",method:"ionScrollEnd",bubbles:!0,cancelable:!0,composed:!0}]},enumerable:!0,configurable:!0}),Object.defineProperty(t,"listeners",{get:function(){return[{name:"body:ionNavDidChange",method:"onNavChanged"}]},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return".sc-ion-content-h{--background:var(--ion-background-color,#fff);--color:var(--ion-text-color,#000);--padding-top:0px;--padding-bottom:0px;--padding-start:0px;--padding-end:0px;--keyboard-offset:0px;--offset-top:0px;--offset-bottom:0px;--overflow:auto;display:block;position:relative;-ms-flex:1;flex:1;width:100%;height:100%;margin:0!important;padding:0!important;font-family:var(--ion-font-family,inherit);contain:layout size style}.ion-color.sc-ion-content-h .inner-scroll.sc-ion-content{background:var(--ion-color-base);color:var(--ion-color-contrast)}.outer-content.sc-ion-content-h{--background:var(--ion-background-color-step-50,#f2f2f2)}.inner-scroll.sc-ion-content{left:0;right:0;top:calc(var(--offset-top) * -1);bottom:calc(var(--offset-bottom) * -1);padding:calc(var(--padding-top) + var(--offset-top)) var(--padding-end) calc(var(--padding-bottom) + var(--keyboard-offset) + var(--offset-bottom)) var(--padding-start);position:absolute;background:var(--background);color:var(--color);-webkit-box-sizing:border-box;box-sizing:border-box;overflow:hidden}.scroll-x.sc-ion-content, .scroll-y.sc-ion-content{-webkit-overflow-scrolling:touch;will-change:scroll-position;-ms-scroll-chaining:none;overscroll-behavior:contain}.scroll-y.sc-ion-content{overflow-y:var(--overflow)}.scroll-x.sc-ion-content{overflow-x:var(--overflow)}.overscroll.sc-ion-content:after, .overscroll.sc-ion-content:before{position:absolute;width:1px;height:1px;content:\"\"}.overscroll.sc-ion-content:before{bottom:-1px}.overscroll.sc-ion-content:after{top:-1px}.content-sizing.sc-ion-content-h{contain:none}.content-sizing.sc-ion-content-h .inner-scroll.sc-ion-content{position:relative}"},enumerable:!0,configurable:!0}),t}();function getParentElement(t){return t.parentElement?t.parentElement:t.parentNode&&t.parentNode.host?t.parentNode.host:null}function getPageElement(t){var e=t.closest("ion-tabs");return e||(t.closest("ion-app,ion-page,.ion-page,page-inner")||getParentElement(t))}function updateScrollDetail(t,e,o,r){var n=t.currentX,l=t.currentY,i=t.timeStamp,c=e.scrollLeft,s=e.scrollTop;r&&(t.startTimeStamp=o,t.startX=c,t.startY=s,t.velocityX=t.velocityY=0),t.timeStamp=o,t.currentX=t.scrollLeft=c,t.currentY=t.scrollTop=s,t.deltaX=c-t.startX,t.deltaY=s-t.startY;var a=o-i;if(a>0&&a<100){var u=(s-l)/a;t.velocityX=(c-n)/a*.7+.3*t.velocityX,t.velocityY=.7*u+.3*t.velocityY}}var Footer=function(){function t(){this.translucent=!1}return t.prototype.hostData=function(){var t=createThemedClasses(this.mode,"footer"),e=this.translucent?createThemedClasses(this.mode,"footer-translucent"):null;return{class:Object.assign({},t,e)}},Object.defineProperty(t,"is",{get:function(){return"ion-footer"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{mode:{type:String,attr:"mode"},translucent:{type:Boolean,attr:"translucent"}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return"ion-footer{display:block;position:relative;-ms-flex-order:1;order:1;width:100%;z-index:10}ion-footer ion-toolbar:last-child{padding-bottom:var(--ion-safe-area-bottom,0)}.footer-ios ion-toolbar:first-child{--border-width:0.55px 0 0}.footer-ios[no-border] ion-toolbar:first-child{--border-width:0}.footer-translucent-ios{-webkit-backdrop-filter:saturate(180%) blur(20px);backdrop-filter:saturate(180%) blur(20px)}.footer-translucent-ios ion-toolbar{--opacity:.8;--backdrop-filter:saturate(180%) blur(20px)}"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"styleMode",{get:function(){return"ios"},enumerable:!0,configurable:!0}),t}(),Header=function(){function t(){this.translucent=!1}return t.prototype.hostData=function(){var t=createThemedClasses(this.mode,"header"),e=this.translucent?createThemedClasses(this.mode,"header-translucent"):null;return{class:Object.assign({},t,e)}},Object.defineProperty(t,"is",{get:function(){return"ion-header"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{mode:{type:String,attr:"mode"},translucent:{type:Boolean,attr:"translucent"}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return"ion-header{display:block;position:relative;-ms-flex-order:-1;order:-1;width:100%;z-index:10}ion-header ion-toolbar:first-child{padding-top:var(--ion-safe-area-top,0)}.header-ios ion-toolbar:last-child{--border-width:0 0 0.55px}.header-ios[no-border] ion-toolbar:last-child{--border-width:0}.header-translucent-ios{-webkit-backdrop-filter:saturate(180%) blur(20px);backdrop-filter:saturate(180%) blur(20px)}.header-translucent-ios ion-toolbar{--opacity:.8;--backdrop-filter:saturate(180%) blur(20px)}"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"styleMode",{get:function(){return"ios"},enumerable:!0,configurable:!0}),t}(),ToolbarTitle=function(){function t(){}return t.prototype.getMode=function(){var t=this.el.closest("ion-toolbar");return t&&t.mode||this.mode},t.prototype.hostData=function(){var t,e=this.getMode();return{class:Object.assign({},createColorClasses(this.color),(t={},t["title-"+e]=!0,t))}},t.prototype.render=function(){return[h("div",{class:"toolbar-title"},h("slot",null))]},Object.defineProperty(t,"is",{get:function(){return"ion-title"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"encapsulation",{get:function(){return"shadow"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{color:{type:String,attr:"color"},el:{elementRef:!0}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return".sc-ion-title-h{--color:initial;display:-ms-flexbox;display:flex;-ms-flex:1;flex:1;-ms-flex-align:center;align-items:center;color:var(--color)}.sc-ion-title-h, .title-ios.sc-ion-title-h{-webkit-transform:translateZ(0);transform:translateZ(0)}.title-ios.sc-ion-title-h{left:0;top:0;padding:0 90px;position:absolute;width:100%;height:100%;font-size:17px;font-weight:600;letter-spacing:-.03em;text-align:center;-webkit-box-sizing:border-box;box-sizing:border-box;pointer-events:none}.title-md.sc-ion-title-h{padding:0 12px;font-size:20px;font-weight:500}.ion-color.sc-ion-title-h{color:var(--ion-color-base)}.toolbar-title.sc-ion-title{display:block;width:100%;text-overflow:ellipsis;white-space:nowrap;overflow:hidden;pointer-events:auto}"},enumerable:!0,configurable:!0}),t}(),Toolbar=function(){function t(){}return t.prototype.hostData=function(){return{class:createColorClasses(this.color)}},t.prototype.render=function(){return[h("div",{class:"toolbar-background"}),h("div",{class:"toolbar-container"},h("slot",{name:"start"}),h("slot",{name:"secondary"}),h("div",{class:"toolbar-content"},h("slot",null)),h("slot",{name:"primary"}),h("slot",{name:"end"}))]},Object.defineProperty(t,"is",{get:function(){return"ion-toolbar"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"encapsulation",{get:function(){return"shadow"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"properties",{get:function(){return{color:{type:String,attr:"color"},config:{context:"config"},mode:{type:String,attr:"mode"}}},enumerable:!0,configurable:!0}),Object.defineProperty(t,"style",{get:function(){return".sc-ion-toolbar-ios-h{--border-width:0;--border-style:solid;--opacity:1;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;padding-left:var(--ion-safe-area-left);padding-right:var(--ion-safe-area-right);display:block;position:relative;width:100%;color:var(--color);font-family:var(--ion-font-family,inherit);contain:content;z-index:10;-webkit-box-sizing:border-box;box-sizing:border-box}.ion-color.sc-ion-toolbar-ios-h{color:var(--ion-color-contrast)}.ion-color.sc-ion-toolbar-ios-h .toolbar-background.sc-ion-toolbar-ios{background:var(--ion-color-base)}.toolbar-container.sc-ion-toolbar-ios{padding:var(--padding-top) var(--padding-end) var(--padding-bottom) var(--padding-start);display:-ms-flexbox;display:flex;position:relative;-ms-flex-direction:row;flex-direction:row;-ms-flex-align:center;align-items:center;-ms-flex-pack:justify;justify-content:space-between;width:100%;min-height:var(--min-height);contain:content;overflow:hidden;z-index:10;-webkit-box-sizing:border-box;box-sizing:border-box}.toolbar-background.sc-ion-toolbar-ios{left:0;right:0;top:0;bottom:0;position:absolute;-webkit-transform:translateZ(0);transform:translateZ(0);border-width:var(--border-width);border-style:var(--border-style);border-color:var(--border-color);background:var(--background);contain:strict;opacity:var(--opacity);z-index:-1;pointer-events:none}.sc-ion-toolbar-ios-h{--background:var(--ion-toolbar-background,#f8f8f8);--color:var(--ion-toolbar-color,var(--ion-text-color,#000));--border-color:var(--ion-toolbar-border-color,var(--ion-border-color,rgba(0,0,0,0.2)));--padding-top:4px;--padding-bottom:4px;--padding-start:4px;--padding-end:4px;--min-height:44px}.toolbar-content.sc-ion-toolbar-ios{-ms-flex:1;flex:1;-ms-flex-order:4;order:4;min-width:0}"},enumerable:!0,configurable:!0}),Object.defineProperty(t,"styleMode",{get:function(){return"ios"},enumerable:!0,configurable:!0}),t}();export{App as IonApp,Buttons as IonButtons,Content as IonContent,Footer as IonFooter,Header as IonHeader,ToolbarTitle as IonTitle,Toolbar as IonToolbar};
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "nssrwlk.h" #include "nspr.h" PR_BEGIN_EXTERN_C /* * Reader-writer lock */ struct nssRWLockStr { PZLock * rw_lock; char * rw_name; /* lock name */ PRUint32 rw_rank; /* rank of the lock */ PRInt32 rw_writer_locks; /* == 0, if unlocked */ PRInt32 rw_reader_locks; /* == 0, if unlocked */ /* > 0 , # of read locks */ PRUint32 rw_waiting_readers; /* number of waiting readers */ PRUint32 rw_waiting_writers; /* number of waiting writers */ PZCondVar * rw_reader_waitq; /* cvar for readers */ PZCondVar * rw_writer_waitq; /* cvar for writers */ PRThread * rw_owner; /* lock owner for write-lock */ /* Non-null if write lock held. */ }; PR_END_EXTERN_C #include <string.h> #ifdef DEBUG_RANK_ORDER #define NSS_RWLOCK_RANK_ORDER_DEBUG /* enable deadlock detection using rank-order for locks */ #endif #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG static PRUintn nss_thread_rwlock_initialized; static PRUintn nss_thread_rwlock; /* TPD key for lock stack */ static PRUintn nss_thread_rwlock_alloc_failed; #define _NSS_RWLOCK_RANK_ORDER_LIMIT 10 typedef struct thread_rwlock_stack { PRInt32 trs_index; /* top of stack */ NSSRWLock *trs_stack[_NSS_RWLOCK_RANK_ORDER_LIMIT]; /* stack of lock pointers */ } thread_rwlock_stack; /* forward static declarations. */ static PRUint32 nssRWLock_GetThreadRank(PRThread *me); static void nssRWLock_SetThreadRank(PRThread *me, NSSRWLock *rwlock); static void nssRWLock_UnsetThreadRank(PRThread *me, NSSRWLock *rwlock); static void nssRWLock_ReleaseLockStack(void *lock_stack); #endif #define UNTIL(x) while(!(x)) /* * Reader/Writer Locks */ /* * NSSRWLock_New * Create a reader-writer lock, with the given lock rank and lock name * */ NSSRWLock * NSSRWLock_New(PRUint32 lock_rank, const char *lock_name) { NSSRWLock *rwlock; rwlock = PR_NEWZAP(NSSRWLock); if (rwlock == NULL) return NULL; rwlock->rw_lock = PZ_NewLock(nssILockRWLock); if (rwlock->rw_lock == NULL) { goto loser; } rwlock->rw_reader_waitq = PZ_NewCondVar(rwlock->rw_lock); if (rwlock->rw_reader_waitq == NULL) { goto loser; } rwlock->rw_writer_waitq = PZ_NewCondVar(rwlock->rw_lock); if (rwlock->rw_writer_waitq == NULL) { goto loser; } if (lock_name != NULL) { rwlock->rw_name = (char*) PR_Malloc(strlen(lock_name) + 1); if (rwlock->rw_name == NULL) { goto loser; } strcpy(rwlock->rw_name, lock_name); } else { rwlock->rw_name = NULL; } rwlock->rw_rank = lock_rank; rwlock->rw_waiting_readers = 0; rwlock->rw_waiting_writers = 0; rwlock->rw_reader_locks = 0; rwlock->rw_writer_locks = 0; return rwlock; loser: NSSRWLock_Destroy(rwlock); return(NULL); } /* ** Destroy the given RWLock "lock". */ void NSSRWLock_Destroy(NSSRWLock *rwlock) { PR_ASSERT(rwlock != NULL); PR_ASSERT(rwlock->rw_waiting_readers == 0); /* XXX Shouldn't we lock the PZLock before destroying this?? */ if (rwlock->rw_name) PR_Free(rwlock->rw_name); if (rwlock->rw_reader_waitq) PZ_DestroyCondVar(rwlock->rw_reader_waitq); if (rwlock->rw_writer_waitq) PZ_DestroyCondVar(rwlock->rw_writer_waitq); if (rwlock->rw_lock) PZ_DestroyLock(rwlock->rw_lock); PR_DELETE(rwlock); } /* ** Read-lock the RWLock. */ void NSSRWLock_LockRead(NSSRWLock *rwlock) { PRThread *me = PR_GetCurrentThread(); PZ_Lock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * assert that rank ordering is not violated; the rank of 'rwlock' should * be equal to or greater than the highest rank of all the locks held by * the thread. */ PR_ASSERT((rwlock->rw_rank == NSS_RWLOCK_RANK_NONE) || (rwlock->rw_rank >= nssRWLock_GetThreadRank(me))); #endif /* * wait if write-locked or if a writer is waiting; preference for writers */ UNTIL ( (rwlock->rw_owner == me) || /* I own it, or */ ((rwlock->rw_owner == NULL) && /* no-one owns it, and */ (rwlock->rw_waiting_writers == 0))) { /* no-one is waiting to own */ rwlock->rw_waiting_readers++; PZ_WaitCondVar(rwlock->rw_reader_waitq, PR_INTERVAL_NO_TIMEOUT); rwlock->rw_waiting_readers--; } rwlock->rw_reader_locks++; /* Increment read-lock count */ PZ_Unlock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG nssRWLock_SetThreadRank(me, rwlock);/* update thread's lock rank */ #endif } /* Unlock a Read lock held on this RW lock. */ void NSSRWLock_UnlockRead(NSSRWLock *rwlock) { PZ_Lock(rwlock->rw_lock); PR_ASSERT(rwlock->rw_reader_locks > 0); /* lock must be read locked */ if (( rwlock->rw_reader_locks > 0) && /* caller isn't screwey */ (--rwlock->rw_reader_locks == 0) && /* not read locked any more */ ( rwlock->rw_owner == NULL) && /* not write locked */ ( rwlock->rw_waiting_writers > 0)) { /* someone's waiting. */ PZ_NotifyCondVar(rwlock->rw_writer_waitq); /* wake him up. */ } PZ_Unlock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * update thread's lock rank */ nssRWLock_UnsetThreadRank(me, rwlock); #endif return; } /* ** Write-lock the RWLock. */ void NSSRWLock_LockWrite(NSSRWLock *rwlock) { PRThread *me = PR_GetCurrentThread(); PZ_Lock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * assert that rank ordering is not violated; the rank of 'rwlock' should * be equal to or greater than the highest rank of all the locks held by * the thread. */ PR_ASSERT((rwlock->rw_rank == NSS_RWLOCK_RANK_NONE) || (rwlock->rw_rank >= nssRWLock_GetThreadRank(me))); #endif /* * wait if read locked or write locked. */ PR_ASSERT(rwlock->rw_reader_locks >= 0); PR_ASSERT(me != NULL); UNTIL ( (rwlock->rw_owner == me) || /* I own write lock, or */ ((rwlock->rw_owner == NULL) && /* no writer and */ (rwlock->rw_reader_locks == 0))) { /* no readers, either. */ rwlock->rw_waiting_writers++; PZ_WaitCondVar(rwlock->rw_writer_waitq, PR_INTERVAL_NO_TIMEOUT); rwlock->rw_waiting_writers--; PR_ASSERT(rwlock->rw_reader_locks >= 0); } PR_ASSERT(rwlock->rw_reader_locks == 0); /* * apply write lock */ rwlock->rw_owner = me; rwlock->rw_writer_locks++; /* Increment write-lock count */ PZ_Unlock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * update thread's lock rank */ nssRWLock_SetThreadRank(me,rwlock); #endif } /* Unlock a Read lock held on this RW lock. */ void NSSRWLock_UnlockWrite(NSSRWLock *rwlock) { PRThread *me = PR_GetCurrentThread(); PZ_Lock(rwlock->rw_lock); PR_ASSERT(rwlock->rw_owner == me); /* lock must be write-locked by me. */ PR_ASSERT(rwlock->rw_writer_locks > 0); /* lock must be write locked */ if ( rwlock->rw_owner == me && /* I own it, and */ rwlock->rw_writer_locks > 0 && /* I own it, and */ --rwlock->rw_writer_locks == 0) { /* I'm all done with it */ rwlock->rw_owner = NULL; /* I don't own it any more. */ /* Give preference to waiting writers. */ if (rwlock->rw_waiting_writers > 0) { if (rwlock->rw_reader_locks == 0) PZ_NotifyCondVar(rwlock->rw_writer_waitq); } else if (rwlock->rw_waiting_readers > 0) { PZ_NotifyAllCondVar(rwlock->rw_reader_waitq); } } PZ_Unlock(rwlock->rw_lock); #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * update thread's lock rank */ nssRWLock_UnsetThreadRank(me, rwlock); #endif return; } /* This is primarily for debugging, i.e. for inclusion in ASSERT calls. */ PRBool NSSRWLock_HaveWriteLock(NSSRWLock *rwlock) { PRBool ownWriteLock; PRThread *me = PR_GetCurrentThread(); /* This lock call isn't really necessary. ** If this thread is the owner, that fact cannot change during this call, ** because this thread is in this call. ** If this thread is NOT the owner, the owner could change, but it ** could not become this thread. */ #if UNNECESSARY PZ_Lock(rwlock->rw_lock); #endif ownWriteLock = (PRBool)(me == rwlock->rw_owner); #if UNNECESSARY PZ_Unlock(rwlock->rw_lock); #endif return ownWriteLock; } #ifdef NSS_RWLOCK_RANK_ORDER_DEBUG /* * nssRWLock_SetThreadRank * Set a thread's lock rank, which is the highest of the ranks of all * the locks held by the thread. Pointers to the locks are added to a * per-thread list, which is anchored off a thread-private data key. */ static void nssRWLock_SetThreadRank(PRThread *me, NSSRWLock *rwlock) { thread_rwlock_stack *lock_stack; PRStatus rv; /* * allocated thread-private-data for rwlock list, if not already allocated */ if (!nss_thread_rwlock_initialized) { /* * allocate tpd, only if not failed already */ if (!nss_thread_rwlock_alloc_failed) { if (PR_NewThreadPrivateIndex(&nss_thread_rwlock, nssRWLock_ReleaseLockStack) == PR_FAILURE) { nss_thread_rwlock_alloc_failed = 1; return; } } else return; } /* * allocate a lock stack */ if ((lock_stack = PR_GetThreadPrivate(nss_thread_rwlock)) == NULL) { lock_stack = (thread_rwlock_stack *) PR_CALLOC(1 * sizeof(thread_rwlock_stack)); if (lock_stack) { rv = PR_SetThreadPrivate(nss_thread_rwlock, lock_stack); if (rv == PR_FAILURE) { PR_DELETE(lock_stack); nss_thread_rwlock_alloc_failed = 1; return; } } else { nss_thread_rwlock_alloc_failed = 1; return; } } /* * add rwlock to lock stack, if limit is not exceeded */ if (lock_stack) { if (lock_stack->trs_index < _NSS_RWLOCK_RANK_ORDER_LIMIT) lock_stack->trs_stack[lock_stack->trs_index++] = rwlock; } nss_thread_rwlock_initialized = 1; } static void nssRWLock_ReleaseLockStack(void *lock_stack) { PR_ASSERT(lock_stack); PR_DELETE(lock_stack); } /* * nssRWLock_GetThreadRank * * return thread's lock rank. If thread-private-data for the lock * stack is not allocated, return NSS_RWLOCK_RANK_NONE. */ static PRUint32 nssRWLock_GetThreadRank(PRThread *me) { thread_rwlock_stack *lock_stack; if (nss_thread_rwlock_initialized) { if ((lock_stack = PR_GetThreadPrivate(nss_thread_rwlock)) == NULL) return (NSS_RWLOCK_RANK_NONE); else return(lock_stack->trs_stack[lock_stack->trs_index - 1]->rw_rank); } else return (NSS_RWLOCK_RANK_NONE); } /* * nssRWLock_UnsetThreadRank * * remove the rwlock from the lock stack. Since locks may not be * unlocked in a FIFO order, the entire lock stack is searched. */ static void nssRWLock_UnsetThreadRank(PRThread *me, NSSRWLock *rwlock) { thread_rwlock_stack *lock_stack; int new_index = 0, index, done = 0; if (!nss_thread_rwlock_initialized) return; lock_stack = PR_GetThreadPrivate(nss_thread_rwlock); PR_ASSERT(lock_stack != NULL); index = lock_stack->trs_index - 1; while (index-- >= 0) { if ((lock_stack->trs_stack[index] == rwlock) && !done) { /* * reset the slot for rwlock */ lock_stack->trs_stack[index] = NULL; done = 1; } /* * search for the lowest-numbered empty slot, above which there are * no non-empty slots */ if ((lock_stack->trs_stack[index] != NULL) && !new_index) new_index = index + 1; if (done && new_index) break; } /* * set top of stack to highest numbered empty slot */ lock_stack->trs_index = new_index; } #endif /* NSS_RWLOCK_RANK_ORDER_DEBUG */
import sys import numpy import os import gc import json import matplotlib import csv import argparse matplotlib.use("TkAgg") from keras.layers import Activation, Dense, LSTM, ReLU from keras.models import Sequential, load_model from keras.optimizers import Adam from keras.utils import plot_model from math import sqrt from matplotlib import pyplot from numpy.random import seed from pandas import DataFrame from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import LabelEncoder from tensorflow import set_random_seed from keras.backend import tensorflow_backend as K from keras.wrappers.scikit_learn import KerasRegressor from sklearn.model_selection import GridSearchCV from helpers import metric_rmse, metric_r2_score, preprocess_data sys.path.insert(0, '../utils') import utils import builtins if hasattr(builtins, "hyperparameters"): defaultHyperparamSource = builtins.hyperparameters else: defaultHyperparamSource = 'hyperparameters.json' """ Parse Args """ parser = argparse.ArgumentParser() parser.add_argument("--hyperparameters", help="Parameters", default=defaultHyperparamSource) args = parser.parse_args() """ Fix seed """ seed(1) set_random_seed(1) """ Constants """ # Get parameters from JSON file PARAM_GRID = None with open(args.hyperparameters) as f: PARAM_GRID = json.load(f, encoding='utf-8') STARTING_TREE_INDEX = PARAM_GRID['starting_index'] # 1-indexed DATASET_PATH = PARAM_GRID['dataset_path'] RESULTS_PATH = PARAM_GRID['results_path'] MODEL_PATH = os.path.join(RESULTS_PATH, 'model.h5') FULL_DATASET = utils.read_data(DATASET_PATH, date_column=0, index_column=0) """ Helper functions """ def predict( lstm_model, tree_index, forecast_input_data_raw, forecast_scaled, prediction_years, list_variables, tree_names, scaler ): print('Starting prediction') forecast_scaled_x = forecast_scaled[:, 0:-len(list_variables)] inputs = utils.slice_data_to_batch_size_factor(forecast_scaled_x, PARAM_GRID['batch_size']) forecast_input_data = forecast_input_data_raw[:inputs.shape[0]] tree_sig_indices = list() for (index, variable) in enumerate(list_variables): if ( variable.lower() == 'a' or variable.lower() == 's' or variable.lower() == 'ele' or variable.lower() == 'sp' ): tree_sig_indices.append(index) predictions = list() for year in range(prediction_years[0], prediction_years[1]): inputs_reshaped = inputs.reshape(inputs.shape[0], 1, inputs.shape[1]) yhat = lstm_model.predict(inputs_reshaped, batch_size=PARAM_GRID['batch_size'], verbose=1) yhat_inverted = utils.invert_scale(scaler, inputs, yhat, len(list_variables)) """ Add forecasted value to predictions """ predictions.append(yhat_inverted[0]) """ Calculate next input """ inputs = numpy.vstack((inputs[1:], yhat[-1:])) """ Copy over values of 'A', 'S', 'Ele' and 'Sp' """ for index in tree_sig_indices: inputs[-1][index] = inputs[0][index] """ Clean up memory """ gc.collect() """ Set forecasted value """ columns = ['Year', 'Tree name'] + list_variables predictions = numpy.array(predictions) df = DataFrame( index=list(range(1, len(predictions)+1)), columns=columns ) df['Tree name'] = tree_names[tree_index] df['Year'] = list(range(prediction_years[0], prediction_years[1])) for index in range(len(list_variables)): variable = list_variables[index] df[variable] = predictions[:, index] """ Add true value and RMSE to data """ true_val = forecast_input_data[:, 0] rmse = numpy.sqrt( numpy.mean( numpy.square( numpy.subtract( predictions[:true_val.shape[0], 0], true_val ) ) ) ) true_val_column = true_val.tolist() + ['-'] * (len(predictions) - true_val.size) df.insert(3, 'BAI True Value', true_val_column) df.insert(4, 'RMSE', rmse) prediction_path = os.path.join(RESULTS_PATH, 'predictions.csv') if not os.path.isfile(prediction_path): df.to_csv(prediction_path, header=True, index=False) else: df.to_csv(prediction_path, mode='a', header=False, index=False) rmse_df = DataFrame(index=[1], columns=['RMSE']) rmse_df['Tree name'] = tree_names[tree_index] rmse_df['RMSE'] = rmse rmse_path = os.path.join(RESULTS_PATH, 'rmse.csv') if not os.path.isfile(rmse_path): rmse_df.to_csv(rmse_path, header=True, index=False) else: rmse_df.to_csv(rmse_path, mode='a', header=False, index=False) """ Clean up memory """ del df del predictions del true_val del rmse del true_val_column gc.collect() """ Main function starts here """ def main(): """ Start of script """ """ Get the model """ model = load_model( MODEL_PATH, custom_objects={'metric_rmse': metric_rmse, 'metric_r2_score': metric_r2_score} ) # try: # except: # print('Something went wrong while loading the model or model does not exist at ' + MODEL_PATH) # print('Exiting') # return """ Get all the different trees in the dataset """ tree_names = numpy.unique(FULL_DATASET.values[:, 0]) """ Create result directories """ if not os.path.isdir(RESULTS_PATH): os.makedirs(RESULTS_PATH) """ Pick input columns from full dataset """ print('Picking columns from dataset ' + DATASET_PATH) VARIABLES = PARAM_GRID['variables'] DATASET = FULL_DATASET[VARIABLES] VARIABLES = VARIABLES[1:] """ Get train, test and forecast values from dataset """ forecast_values = DATASET.values """ Preprocess all the data at once """ print('Preprocessing data') encoder = LabelEncoder() forecast_input_data = preprocess_data(forecast_values.copy(), encoder) """ Transform data to be supervised learning """ print('Transforming data to supervised multivariate model') forecast_supervised = DataFrame() for (i, tree) in enumerate(tree_names): forecast_tree_data = forecast_input_data[forecast_input_data[:, 0] == i, :] forecast_tree_supervised = utils.timeseries_to_supervised_multivariate(forecast_tree_data, 1, 1) """ Drop second tree name column and first row with 0 values """ forecast_tree_supervised.drop('var1(t)', axis=1, inplace=True) forecast_tree_supervised.drop(0, inplace=True) forecast_supervised = forecast_supervised.append(forecast_tree_supervised) forecast_supervised = forecast_supervised.values """ Create scaler and scale data """ scaler = utils.create_scaler(forecast_supervised[:, 1:]) """ Start prediction """ print('Starting prediction') for (i, tree) in enumerate(tree_names): if (i < STARTING_TREE_INDEX-1): print('Skipping tree %d' %(i+1)) continue print('Processing tree %d' % (i+1)) """ Get tree data """ forecast_tree_data = forecast_supervised[forecast_supervised[:, 0] == i, :] forecast_tree_input_data = forecast_input_data[forecast_input_data[:, 0] == i, :] # Remove tree name column forecast_tree_data = forecast_tree_data[:, 1:] forecast_tree_input_data = forecast_tree_input_data[:, 1:] # Scale data forecast_scaled = scaler.transform(forecast_tree_data) """ Prediction years """ prediction_years = (1981, 2051) # 2016 to 2050 inclusive """ Reset states to prepare for prediction """ model.model.reset_states() """ Predict """ predict( model, i, forecast_tree_input_data, forecast_scaled, prediction_years, VARIABLES, tree_names, scaler ) """ Clear variables """ del forecast_tree_data del forecast_tree_input_data del forecast_scaled gc.collect() if __name__ == "__main__": main() sys.exit(0)
/// /// Copyright (C) 2015, Dependable Systems Laboratory, EPFL /// /// Permission is hereby granted, free of charge, to any person obtaining a copy /// of this software and associated documentation files (the "Software"), to deal /// in the Software without restriction, including without limitation the rights /// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell /// copies of the Software, and to permit persons to whom the Software is /// furnished to do so, subject to the following conditions: /// /// The above copyright notice and this permission notice shall be included in all /// copies or substantial portions of the Software. /// /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR /// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, /// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE /// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER /// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, /// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE /// SOFTWARE. /// #ifndef LOGGING_H #define LOGGING_H #include <llvm/Support/raw_ostream.h> #define DEFAULT_CONSOLE_OUTPUT "info" #define DEFAULT_PLUGIN_LOG_LEVEL "info" namespace s2e { enum LogLevel { LOG_ALL, LOG_DEBUG, LOG_INFO, LOG_WARN, LOG_NONE }; /* The compiler complains if you don't call it */ static bool parseLogLevel(const std::string &levelString, LogLevel *out) __attribute__((unused)); static bool parseLogLevel(const std::string &levelString, LogLevel *out) { if (levelString == "debug") { *out = LOG_DEBUG; return true; } else if (levelString == "info") { *out = LOG_INFO; return true; } else if (levelString == "warn") { *out = LOG_WARN; return true; } else if (levelString == "none") { *out = LOG_NONE; return true; } return false; } } // namespace s2e #endif
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('(o(){G $,30,1x,21,E,2Q,2P,1t,1I,3e,T,1Y,2T,1Z,1v,3f,1U,1K,1c,4i,1S,1f,3i,2M,4z={}.a8,5z=[].a7||o(69){V(G i=0,l=8.Q;i<l;i++){u(i 4d 8&&8[i]===69)9 i}9-1},7d=o(2e,67){9 o(){9 2e.2U(67,14)}},7H=[].2M;2M=5p.K.2M;$=o(4w,4y){u(4y==M){4y=1A}u(1k 4w==="2m"||(1k 1O!=="2C"&&1O!==M)){9 4w}9 4y.a6(4w)};1f=(o(){o 1f(){}1f.5h=(o(){G 1N;1N=0;9{2O:o(){9 1N++}}})();1f.a5=o(2j){G 3u,4C,3w,1d,D,11;4C=2j;1d=2M.1g(14,1);V(D=0,11=1d.Q;D<11;D++){3u=1d[D];V(3w 4d 3u){u(!4z.1g(3u,3w))3x;4C[3w]=3u[3w]}}9 4C};1f.27=o(1y){u(1y<0){9 0}u(1y>B){9 B}9 1y};1f.3C=o(5Z,48,3E){G 3G,D,11,F,1J,Y;u(3E==M){3E={}}F=5Z.a0;Y=[];V(D=0,11=F.Q;D<11;D++){3G=F[D];u((3E.4R!=M)&&(1J=3G.4P,5z.1g(3E.4R,1J)>=0)){3x}Y.1z(48.6E(3G.4P,3G.9Z))}9 Y};9 1f})();u(1k 1O!=="2C"&&1O!==M){4i=1O;2P=4l(\'C\');2T=2P.2T;3e=4l(\'9Y\');3i=4l(\'3i\')}S{4i=2X}4i.E=E=(o(){E.4K={6i:"4.1.0",78:"2/12/9X"};E.5Q=1P;E.1W=1k 1O!=="2C"&&1O!==M;E.63=!E.1W;E.3R=2r;E.4H="6B";E.9W=o(){9"9V "+E.4K.6i+", 9U "+E.4K.78};E.7h="";E.7i="9T";E.4E=o(C){u(E.1W){9 2r}u(1k C==="3P"){C=$(C)}u(!((C!=M)&&(C.39!=M))){9 M}9 C.39(\'1e-2b-1N\')};o E(){G 18,1C,1N,13=8;u(14.Q===0){2D"4u 14";}u(8 9S E){8.37=8.37.4J(8);8.3O=8.3O.4J(8);18=14[0];u(!E.1W){1N=3L(E.4E(18[0]),10);1C=1k 18[1]==="o"?18[1]:1k 18[2]==="o"?18[2]:o(){};u(!9R(1N)&&1S.7u(1N)){9 1S.2h(1N,1C)}}8.1N=1f.5h.2O();8.4h=8.2G=M;8.3K={x:0,y:0};8.64=1P;8.4f=1P;8.3J=[];8.4O=[];8.4S=[];8.3F=M;8.6P=1P;8.9Q=1s 30(8);8.2J=1s 1c(8);8.79(o(){13.7a(18);9 13.7c()});9 8}S{9 1s E(14)}}E.K.79=o(2Z){G 4U,13=8;u(E.1W){9 47(o(){9 2Z.1g(13)},0)}S{u(1A.4V==="44"){1v.1E("7w 7x");9 47(o(){9 2Z.1g(13)},0)}S{4U=o(){u(1A.4V==="44"){1v.1E("7w 7x");9 2Z.1g(13)}};9 1A.7z("9P",4U,1P)}}};E.K.7a=o(18){G 3B,1y,F,Y;u(18.Q===0){2D"4u 14 7G";}8.2u=M;8.42=M;8.3Z=M;8.1C=o(){};8.61(18[0]);u(18.Q===1){9}3c(1k 18[1]){1n"3P":8.3Z=18[1];2R;1n"o":8.1C=18[1]}u(18.Q===2){9}8.1C=18[2];u(18.Q===4){F=18[4];Y=[];V(3B 4d F){u(!4z.1g(F,3B))3x;1y=F[3B];Y.1z(8.1a[3B]=1y)}9 Y}};E.K.61=o(2j){u(E.1W){8.2u=2j;8.42=\'68\';9}u(1k 2j==="2m"){8.2u=2j}S{8.2u=$(2j)}9 8.42=8.2u.4P.3d()};E.K.7c=o(){3c(8.42){1n"68":9 8.6b();1n"19":9 8.6s();1n"C":9 8.6t()}};E.K.6b=o(){G 13=8;1v.1E("9O V 1W");8.P=1s 2T();8.P.4X=o(){1v.1E("2T 6I. 6N = "+13.P.H+", 6R = "+13.P.N);13.C=1s 2P(13.P.H,13.P.N);9 13.37()};8.P.9E=o(76){2D 76;};9 8.P.1d=8.2u};E.K.6s=o(){8.P=8.2u;8.C=1A.2f(\'C\');8.1j=8.C.2g(\'2d\');1f.3C(8.P,8.C,{4R:[\'1d\']});8.P.4Y.4Z(8.C,8.P);8.52();9 8.59()};E.K.6t=o(){8.C=8.2u;8.1j=8.C.2g(\'2d\');u(8.3Z!=M){8.P=1A.2f(\'19\');8.P.1d=8.3Z;8.52();9 8.59()}S{9 8.37()}};E.K.52=o(){u(8.7l()){1v.1E(8.P.1d,"->",8.4x());8.7n=2r;8.P.1d=8.4x()}u(1Y.7o(8.P)){8.P.1d=1Y.2V(8.P.1d);9 1v.1E("9D P 9C, 9B 9A = "+8.P.1d)}};E.K.59=o(){u(8.P.44){9 8.3O()}S{9 8.P.4X=8.3O}};E.K.3O=o(){1v.1E("2T 6I. 6N = "+8.P.H+", 6R = "+8.P.N);u(8.7n){8.C.H=8.P.H/8.4e();8.C.N=8.P.N/8.4e()}S{8.C.H=8.P.H;8.C.N=8.P.N}9 8.37()};E.K.37=o(){G i,1p,D,11,F;u(8.1j==M){8.1j=8.C.2g(\'2d\')}8.31=8.3o=8.H=8.C.H;8.3m=8.3l=8.N=8.C.N;8.7I();u(!8.5O()){8.5P()}u(8.P!=M){8.1j.3k(8.P,0,0,8.P.H,8.P.N,0,0,8.3o,8.3l)}8.1G=8.1j.2E(0,0,8.C.H,8.C.N);8.I=8.1G.1e;u(E.3R){8.4h=1s 41(8.I.Q);8.2G=1s 41(8.I.Q);F=8.I;V(i=D=0,11=F.Q;D<11;i=++D){1p=F[i];8.4h[i]=1p;8.2G[i]=1p}}8.1F={H:8.C.H,N:8.C.N};1S.65(8.1N,8);8.1C.1g(8,8);9 8.1C=o(){}};E.K.9z=o(){G 1p,D,11,F,Y;u(!E.3R){2D"5w 3W";}8.2G=1s 41(8.I.Q);F=8.I;Y=[];V(D=0,11=F.Q;D<11;D++){1p=F[D];Y.1z(8.2G.1z(1p))}9 Y};E.K.5O=o(){9 E.4E(8.C)!=M};E.K.5P=o(){u(E.1W||8.C.39(\'1e-2b-1N\')){9}9 8.C.6E(\'1e-2b-1N\',8.1N)};E.K.5x=o(){9 8.C.39(\'1e-2b-6e-3W\')!==M};E.K.7I=o(){G 2n;u(E.1W||8.5x()){9}2n=8.4e();u(2n!==1){1v.1E("9y 2n = "+2n);8.6P=2r;8.3o=8.C.H;8.3l=8.C.N;8.C.H=8.3o*2n;8.C.N=8.3l*2n;8.C.6l.H=""+8.3o+"6o";8.C.6l.N=""+8.3l+"6o";8.1j.9x(2n,2n);8.H=8.31=8.C.H;9 8.N=8.3m=8.C.N}};E.K.4e=o(){G 5C,2w;2w=2X.2w||1;5C=8.1j.9w||8.1j.9v||8.1j.9u||8.1j.9t||8.1j.9s||1;9 2w/5C};E.K.6H=o(){9 2X.2w!==1};E.K.7l=o(){u(8.5x()||!8.6H()){9 1P}9 8.4x()!==M};E.K.4x=o(){u(8.P==M){9 M}9 8.P.39(\'1e-2b-6e\')};E.K.43=o(6J){G 4b;4b=8.C;8.C=6J;8.1j=8.C.2g(\'2d\');4b.4Y.4Z(8.C,4b);8.H=8.C.H;8.N=8.C.N;8.1G=8.1j.2E(0,0,8.C.H,8.C.N);8.I=8.1G.1e;9 8.1F={H:8.C.H,N:8.C.N}};E.K.6O=o(1C){G 13=8;u(1C==M){1C=o(){}}1I.2H(8,"6Q");9 8.2J.2h(o(){13.1j.4a(13.1G,0,0);9 1C.1g(13)})};E.K.9q=o(){G i,1p,D,11,F;u(!E.3R){2D"5w 3W";}F=8.6W();V(i=D=0,11=F.Q;D<11;i=++D){1p=F[i];8.I[i]=1p}9 8.1j.4a(8.1G,0,0)};E.K.9p=o(){G C,1H,i,1G,1p,I,D,11,F;C=1A.2f(\'C\');1f.3C(8.C,C);C.H=8.31;C.N=8.3m;1H=C.2g(\'2d\');1G=1H.2E(0,0,C.H,C.N);I=1G.1e;F=8.4h;V(i=D=0,11=F.Q;D<11;i=++D){1p=F[i];I[i]=1p}1H.4a(1G,0,0);8.3K={x:0,y:0};8.4f=1P;9 8.43(C)};E.K.6W=o(){G C,46,1H,5I,5M,i,1G,1p,I,4p,3y,40,4v,H,D,1b,11,F,1J,2l,5N;u(!E.3R){2D"5w 3W";}4p=[];40=8.3K.x;5I=40+8.H;4v=8.3K.y;5M=4v+8.N;u(8.4f){C=1A.2f(\'C\');C.H=8.31;C.N=8.3m;1H=C.2g(\'2d\');1G=1H.2E(0,0,C.H,C.N);I=1G.1e;F=8.2G;V(i=D=0,11=F.Q;D<11;i=++D){1p=F[i];I[i]=1p}1H.4a(1G,0,0);3y=1A.2f(\'C\');3y.H=8.H;3y.N=8.N;1H=3y.2g(\'2d\');1H.3k(C,0,0,8.31,8.3m,0,0,8.H,8.N);I=1H.2E(0,0,8.H,8.N).1e;H=8.H}S{I=8.2G;H=8.31}V(i=1b=0,1J=I.Q;1b<1J;i=1b+=4){46=1U.7k(i,H);u(((40<=(2l=46.x)&&2l<5I))&&((4v<=(5N=46.y)&&5N<5M))){4p.1z(I[i],I[i+1],I[i+2],I[i+3])}}9 4p};E.K.1L=o(W,4g){8.2J.38({X:T.1V.5H,W:W,4g:4g});9 8};E.K.5G=o(W,J,1R,26){G i,D,F;u(!1R){1R=0;V(i=D=0,F=J.Q;0<=F?D<F:D>F;i=0<=F?++D:--D){1R+=J[i]}}8.2J.38({X:T.1V.5F,W:W,J:J,1R:1R,26:26||0});9 8};E.K.5E=o(2k,18){8.2J.38({X:T.1V.1K,2k:2k,18:18});9 8};E.K.5D=o(1C){G 1q;1q=1s 1Z(8);8.4S.1z(1q);8.2J.38({X:T.1V.5B});1C.1g(1q);8.2J.38({X:T.1V.5y});9 8};E.K.5R=o(1q){9 8.5S(1q)};E.K.5S=o(1q){8.4O.1z(8.3F);8.3J.1z(8.I);8.3F=1q;9 8.I=1q.I};E.K.5T=o(){8.I=8.3J.5V();9 8.3F=8.4O.5V()};E.K.5W=o(){9 8.3F.5Y()};9 E})();30=(o(){o 30(c){8.c=c}30.K.9o=o(){G i,22,3j,D,1b,1l,F;22={r:{},g:{},b:{}};V(i=D=0;D<=B;i=++D){22.r[i]=0;22.g[i]=0;22.b[i]=0}V(i=1b=0,F=8.c.I.Q;1b<F;i=1b+=4){22.r[8.c.I[i]]++;22.g[8.c.I[i+1]]++;22.b[8.c.I[i+2]]++}3j=8.c.I.Q/4;V(i=1l=0;1l<=B;i=++1l){22.r[i]/=3j;22.g[i]/=3j;22.b[i]/=3j}9 22};9 30})();E.5v=o(){G 19,3n,6d,D,11,Y;3n=1A.9h("19[1e-2b]");u(!(3n.Q>0)){9}Y=[];V(D=0,11=3n.Q;D<11;D++){19=3n[D];Y.1z(6d=1s 2Q(19,o(){8.6g();9 8.2h()}))}9 Y};u(E.63){(o(){u(1A.4V==="44"){9 E.5v()}S{9 1A.7z("9d",E.5v,1P)}})()}2Q=(o(){G 4c;4c="(\\\\w+)\\\\((.*?)\\\\)";o 2Q(2p,6m){8.6n=2p.39(\'1e-2b\');8.2b=E(2p,6m.4J(8))}2Q.K.6g=o(){G 18,3p,3q,5q,5o,m,r,3t,D,11,F,Y;8.2p=8.2b.C;r=1s 6v(4c,\'g\');3t=8.6n.5n(r);u(!(3t.Q>0)){9}r=1s 6v(4c);Y=[];V(D=0,11=3t.Q;D<11;D++){5q=3t[D];F=5q.5n(r),m=F[0],3p=F[1],18=F[2];5o=1s 9b("9 o() { 8."+3p+"("+18+"); };");6C{3q=5o();Y.1z(3q.1g(8.2b))}6D(e){Y.1z(1v.1E(e))}}9 Y};2Q.K.2h=o(){G 2p;2p=8.2p;9 8.2b.6O(o(){9 2p.4Y.4Z(8.62(),2p)})};9 2Q})();E.1x=1x=(o(){o 1x(){}1x.54={};1x.Z=o(W,3q){9 8.54[W]=3q};1x.2h=o(W,O,L){9 8.54[W](O,L)};9 1x})();E.21=21=(o(){o 21(){}21.9a=o(2Y,3b,3z,3A){9 R.6X(R.1B(3z-2Y,2)+R.1B(3A-3b,2))};21.6Z=o(1w,U,3D){G 2o;u(3D==M){3D=1P}2o=1w+(R.96()*(U-1w));u(3D){9 2o.94(3D)}S{9 R.4T(2o)}};21.7b=o(A){9(0.93*A.r)+(0.91*A.g)+(0.90*A.b)};21.1u=o(1h,25,2c,15,4j,4k){G 4N,4M,4n,4o,3M,3N,1u,4L,2F,i,j,36,3T,t,3Q,2Y,3z,4I,3S,3b,3A,4G,D,1b,1l,F,1J;3Q=1h[0];3S=1h[1];2Y=25[0];3b=25[1];3z=2c[0];3A=2c[1];4I=15[0];4G=15[1];1u={};3M=3L(3*(2Y-3Q),10);4n=3*(3z-2Y)-3M;4N=4I-3Q-3M-4n;3N=3*(3b-3S);4o=3*(3A-3b)-3N;4M=4G-3S-3N-4o;V(i=D=0;D<5U;i=++D){t=i/5U;4L=R.4T((4N*R.1B(t,3))+(4n*R.1B(t,2))+(3M*t)+3Q);2F=R.4T((4M*R.1B(t,3))+(4o*R.1B(t,2))+(3N*t)+3S);u(4j&&2F<4j){2F=4j}S u(4k&&2F>4k){2F=4k}1u[4L]=2F}u(1u.Q<15[0]+1){V(i=1b=0,F=15[0];0<=F?1b<=F:1b>=F;i=0<=F?++1b:--1b){u(!(1u[i]!=M)){36=[i-1,1u[i-1]];V(j=1l=i,1J=15[0];i<=1J?1l<=1J:1l>=1J;j=i<=1J?++1l:--1l){u(1u[j]!=M){3T=[j,1u[j]];2R}}1u[i]=36[1]+((3T[1]-36[1])/(3T[0]-36[0]))*(i-36[0])}}}u(!(1u[15[0]]!=M)){1u[15[0]]=1u[15[0]-1]}9 1u};9 21})();1t=(o(){o 1t(){}1t.4F=o(2K){G b,g,r;u(2K.8Z(0)==="#"){2K=2K.4D(1)}r=3L(2K.4D(0,2),16);g=3L(2K.4D(2,2),16);b=3L(2K.4D(4,2),16);9{r:r,g:g,b:b}};1t.7J=o(r,g,b){G d,h,l,U,1w,s;u(1k r==="2m"){g=r.g;b=r.b;r=r.r}r/=B;g/=B;b/=B;U=R.U(r,g,b);1w=R.1w(r,g,b);l=(U+1w)/2;u(U===1w){h=s=0}S{d=U-1w;s=l>0.5?d/(2-U-1w):d/(U+1w);h=(o(){3c(U){1n r:9(g-b)/d+(g<b?6:0);1n g:9(b-r)/d+2;1n b:9(r-g)/d+4}})();h/=6}9{h:h,s:s,l:l}};1t.8V=o(h,s,l){G b,g,p,q,r;u(1k h==="2m"){s=h.s;l=h.l;h=h.h}u(s===0){r=g=b=l}S{q=l<0.5?l*(1+s):l+s-l*s;p=2*l-q;r=8.4r(p,q,h+1/3);g=8.4r(p,q,h);b=8.4r(p,q,h-1/3)}9{r:r*B,g:g*B,b:b*B}};1t.4r=o(p,q,t){u(t<0){t+=1}u(t>1){t-=1}u(t<1/6){9 p+(q-p)*6*t}u(t<1/2){9 q}u(t<2/3){9 p+(q-p)*(2/3-t)*6}9 p};1t.6a=o(r,g,b){G d,h,U,1w,s,v;r/=B;g/=B;b/=B;U=R.U(r,g,b);1w=R.1w(r,g,b);v=U;d=U-1w;s=U===0?0:d/U;u(U===1w){h=0}S{h=(o(){3c(U){1n r:9(g-b)/d+(g<b?6:0);1n g:9(b-r)/d+2;1n b:9(r-g)/d+4}})();h/=6}9{h:h,s:s,v:v}};1t.6c=o(h,s,v){G b,f,g,i,p,q,r,t;i=R.3H(h*6);f=h*6-i;p=v*(1-s);q=v*(1-f*s);t=v*(1-(1-f)*s);3c(i%6){1n 0:r=v;g=t;b=p;2R;1n 1:r=q;g=v;b=p;2R;1n 2:r=p;g=v;b=t;2R;1n 3:r=p;g=q;b=v;2R;1n 4:r=t;g=p;b=v;2R;1n 5:r=v;g=p;b=q}9{r:r*B,g:g*B,b:b*B}};1t.6f=o(r,g,b){G x,y,z;r/=B;g/=B;b/=B;u(r>0.4Q){r=R.1B((r+0.20)/1.20,2.4)}S{r/=12.92}u(g>0.4Q){g=R.1B((g+0.20)/1.20,2.4)}S{g/=12.92}u(b>0.4Q){b=R.1B((b+0.20)/1.20,2.4)}S{b/=12.92}x=r*0.8S+g*0.8R+b*0.8Q;y=r*0.8P+g*0.8O+b*0.8M;z=r*0.8L+g*0.8J+b*0.8I;9{x:x*1i,y:y*1i,z:z*1i}};1t.8H=o(x,y,z){G b,g,r;x/=1i;y/=1i;z/=1i;r=(3.8G*x)+(-1.8F*y)+(-0.8E*z);g=(-0.8D*x)+(1.8B*y)+(0.8A*z);b=(0.8z*x)+(-0.8y*y)+(1.8x*z);u(r>0.57){r=(1.20*R.1B(r,0.58))-0.20}S{r*=12.92}u(g>0.57){g=(1.20*R.1B(g,0.58))-0.20}S{g*=12.92}u(b>0.57){b=(1.20*R.1B(b,0.58))-0.20}S{b*=12.92}9{r:r*B,g:g*B,b:b*B}};1t.6F=o(x,y,z){G a,b,l,5a,5b,5c;u(1k x==="2m"){y=x.y;z=x.z;x=x.x}5a=95.6K;5b=1i.0;5c=6L.6M;x/=5a;y/=5b;z/=5c;u(x>0.5d){x=R.1B(x,0.5e)}S{x=(7.5g*x)+0.32}u(y>0.5d){y=R.1B(y,0.5e)}S{y=(7.5g*y)+0.32}u(z>0.5d){z=R.1B(z,0.5e)}S{z=(7.5g*z)+0.32}l=6S*y-16;a=6T*(x-y);b=6U*(y-z);9{l:l,a:a,b:b}};1t.8w=o(l,a,b){G x,y,z;u(1k l==="2m"){a=l.a;b=l.b;l=l.l}y=(l+16)/6S;x=y+(a/6T);z=y-(b/6U);u(x>0.5j){x=x*x*x}S{x=0.5k*(x-0.32)}u(y>0.5j){y=y*y*y}S{y=0.5k*(y-0.32)}u(z>0.5j){z=z*z*z}S{z=0.5k*(z-0.32)}9{x:x*95.6K,y:y*1i.0,z:z*6L.6M}};1t.8v=o(r,g,b){G 5m;u(1k r==="2m"){g=r.g;b=r.b;r=r.r}5m=8.6f(r,g,b);9 8.6F(5m)};1t.8u=o(l,a,b){};9 1t})();1I=(o(){o 1I(){}1I.2I={};1I.71=["72","73","6Q","74","75","3s"];1I.2H=o(29,X,1e){G 3r,D,11,F,Y;u(8.2I[X]&&8.2I[X].Q){F=8.2I[X];Y=[];V(D=0,11=F.Q;D<11;D++){3r=F[D];u(3r.29===M||29.1N===3r.29.1N){Y.1z(3r.2e.1g(29,1e))}S{Y.1z(8r 0)}}9 Y}};1I.8q=o(29,X,2e){G 5s,5t;u(1k 29==="3P"){5t=29;5s=X;29=M;X=5t;2e=5s}u(5z.1g(8.71,X)<0){9 1P}u(!8.2I[X]){8.2I[X]=[]}8.2I[X].1z({29:29,2e:2e});9 2r};9 1I})();E.1I=1I;E.T=T=(o(){o T(){}T.1V={5H:1,5F:2,5B:3,5y:4,5u:5,1K:6};T.Z=o(W,7e){9 E.K[W]=7e};9 T})();E.1Y=1Y=(o(){o 1Y(){}1Y.7f=/(?:(?:8n|8l):\\/\\/)((?:\\w+)\\.(?:(?:\\w|\\.)+))/;1Y.7o=o(19){G 3Y;u(19==M){9 1P}u(8.7j(19)){9 1P}3Y=19.1d.5n(8.7f);u(3Y){9 3Y[1]!==1A.8k}S{9 1P}};1Y.7j=o(19){G F;9(19.4H!=M)&&((F=19.4H.3d())===\'6B\'||F===\'8j-8i\')};1Y.2V=o(1d){9""+E.7h+"?"+E.7i+"="+(8g(1d))};1Y.8f=o(2L){G 4s;4s={8e:\'8d\',8a:\'89\',88:\'87\',85:\'84\'};2L=2L.3d();u(4s[2L]!=M){2L=4s[2L]}9"82/7Y."+2L};9 1Y})();E.K.7X=o(){u(1k 1O!=="2C"&&1O!==M){9 8.7B.2U(8,14)}S{9 8.7C.2U(8,14)}};E.K.7C=o(X){G P;u(X==M){X="7D"}X=X.3d();P=8.5J(X).7W("P/"+X,"P/7M-7L");9 1A.8W.7K=P};E.K.7B=o(35,4q){G 5L;u(4q==M){4q=2r}6C{5L=3i.7N(35);u(5L.7O()&&!4q){9 1P}}6D(e){1v.1E("7P 7Q 35 "+35)}9 3i.7R(35,8.C.7S(),o(){9 1v.1E("7T 7U 48 "+35)})};E.K.62=o(X){G 19;19=1A.2f(\'19\');19.1d=8.5J(X);19.H=8.1F.H;19.N=8.1F.N;u(2X.2w){19.H/=2X.2w;19.N/=2X.2w}9 19};E.K.5J=o(X){u(X==M){X="7D"}X=X.3d();9 8.C.7V("P/"+X)};1Z=(o(){o 1Z(c){8.c=c;8.3p=8.c;8.1a={5K:\'7A\',2z:1.0};8.7Z=1f.5h.2O();8.C=1k 1O!=="2C"&&1O!==M?1s 2P():1A.2f(\'C\');8.C.H=8.c.1F.H;8.C.N=8.c.1F.N;8.1j=8.C.2g(\'2d\');8.1j.80(8.C.H,8.C.N);8.1G=8.1j.2E(0,0,8.C.H,8.C.N);8.I=8.1G.1e}1Z.K.5D=o(2Z){9 8.c.5D.1g(8.c,2Z)};1Z.K.81=o(7y){8.1a.5K=7y;9 8};1Z.K.2z=o(2z){8.1a.2z=2z/1i;9 8};1Z.K.83=o(){G i,1Q,D,F;1Q=8.c.I;V(i=D=0,F=8.c.I.Q;D<F;i=D+=4){8.I[i]=1Q[i];8.I[i+1]=1Q[i+1];8.I[i+2]=1Q[i+2];8.I[i+3]=1Q[i+3]}9 8};1Z.K.45=o(){9 8.c.45.2U(8.c,14)};1Z.K.86=o(P){u(1k P==="2m"){P=P.1d}S u(1k P==="3P"&&P[0]==="#"){P=$(P).1d}u(!P){9 8}8.c.3g.1z({X:T.1V.5u,1d:P,1q:8});9 8};1Z.K.5Y=o(){G i,2N,1Q,17,O,L,D,F,Y;1Q=8.c.3J[8.c.3J.Q-1];2N=8.c.I;Y=[];V(i=D=0,F=2N.Q;D<F;i=D+=4){L={r:1Q[i],g:1Q[i+1],b:1Q[i+2],a:1Q[i+3]};O={r:2N[i],g:2N[i+1],b:2N[i+2],a:2N[i+3]};17=1x.2h(8.1a.5K,O,L);17.r=1f.27(17.r);17.g=1f.27(17.g);17.b=1f.27(17.b);u(!(17.a!=M)){17.a=O.a}1Q[i]=L.r-((L.r-17.r)*(8.1a.2z*(17.a/B)));1Q[i+1]=L.g-((L.g-17.g)*(8.1a.2z*(17.a/B)));Y.1z(1Q[i+2]=L.b-((L.b-17.b)*(8.1a.2z*(17.a/B))))}9 Y};9 1Z})();3f=(o(){o 3f(){G W,D,11,F;F=[\'7s\',\'8b\',\'8c\',\'7r\'];V(D=0,11=F.Q;D<11;D++){W=F[D];8[W]=(o(W){9 o(){u(!E.5Q){9}9 7q[W].2U(7q,14)}})(W)}8.1E=8.7s}9 3f})();1v=1s 3f();1U=(o(){1U.5A=o(x,y,H){9(y*H+x)*4};1U.7k=o(1m,H){G x,y;y=R.3H(1m/(H*4));x=(1m%(H*4))/4;9{x:x,y:y}};o 1U(c){8.c=c;8.1m=0}1U.K.8h=o(){G x,y;y=8.c.1F.N-R.3H(8.1m/(8.c.1F.H*4));x=(8.1m%(8.c.1F.H*4))/4;9{x:x,y:y}};1U.K.7m=o(3U,3V){G 1T;1T=8.1m+(8.c.1F.H*4*(3V*-1))+(4*3U);u(1T>8.c.I.Q||1T<0){9{r:0,g:0,b:0,a:0}}9{r:8.c.I[1T],g:8.c.I[1T+1],b:8.c.I[1T+2],a:8.c.I[1T+3]}};1U.K.8m=o(3U,3V,A){G 7g;7g=8.1m+(8.c.1F.H*4*(3V*-1))+(4*3U);u(1T>8.c.I.Q||1T<0){9}8.c.I[1T]=A.r;8.c.I[1T+1]=A.g;8.c.I[1T+2]=A.b;8.c.I[1T+3]=A.a;9 2r};1U.K.8o=o(x,y){G 1m;1m=8.5A(x,y,8.H);9{r:8.c.I[1m],g:8.c.I[1m+1],b:8.c.I[1m+2],a:8.c.I[1m+3]}};1U.K.8p=o(x,y,A){G 1m;1m=8.5A(x,y,8.H);8.c.I[1m]=A.r;8.c.I[1m+1]=A.g;8.c.I[1m+2]=A.b;9 8.c.I[1m+3]=A.a};9 1U})();1K=(o(){o 1K(){}1K.5r={};1K.Z=o(W,2k){9 8.5r[W]=2k};1K.2h=o(1j,W,18){9 8.5r[W].2U(1j,18)};9 1K})();E.1K=1K;E.1c=1c=(o(){1c.2y=E.1W?4l(\'8s\').8t().Q:4;o 1c(c){8.c=c;8.2q=7d(8.2q,8);8.3g=[];8.2B=M}1c.K.38=o(5i){u(5i==M){9}9 8.3g.1z(5i)};1c.K.2q=o(){G 1q;u(8.3g.Q===0){1I.2H(8,"74");u(8.56!=M){8.56.1g(8.c)}9 8}8.1r=8.3g.6A();3c(8.1r.X){1n T.1V.5B:1q=8.c.4S.6A();8.c.5R(1q);9 8.2q();1n T.1V.5y:8.c.5W();8.c.5T();9 8.2q();1n T.1V.5u:9 8.6z(8.1r.1q,8.1r.1d);1n T.1V.1K:9 8.6y();8C:9 8.6x()}};1c.K.2h=o(1C){8.56=1C;8.2B=1s 41(8.c.I.Q);9 8.2q()};1c.K.53=o(2e){G 3v,51,15,f,i,50,n,1h,D,F,Y,13=8;8.3X=0;n=8.c.I.Q;51=R.3H((n/4)/1c.2y);3v=51*4;50=3v+((n/4)%1c.2y)*4;Y=[];V(i=D=0,F=1c.2y;0<=F?D<F:D>F;i=0<=F?++D:--D){1h=i*3v;15=1h+(i===1c.2y-1?50:3v);u(E.1W){f=3e(o(){9 2e.1g(13,i,1h,15)});Y.1z(f.8K())}S{Y.1z(47((o(i,1h,15){9 o(){9 2e.1g(13,i,1h,15)}})(i,1h,15),0))}}9 Y};1c.K.6x=o(){1I.2H(8.c,"72",8.1r);u(8.1r.X===T.1V.5H){9 8.53(8.6q)}S{9 8.53(8.6p)}};1c.K.6y=o(){1v.1E("8N 2k "+8.1r.2k);1K.2h(8.c,8.1r.2k,8.1r.18);1v.1E("1K "+8.1r.2k+" 4W!");9 8.2q()};1c.K.6q=o(2a,1h,15){G 1e,i,2t,1X,D;1v.1E("6k #"+2a+" - T: "+8.1r.W+", 8T: "+1h+", 8U: "+15);1I.2H(8.c,"75",{66:2a,60:1c.2y,8X:1h,8Y:15});1e={r:0,g:0,b:0,a:0};2t=1s 1U(8.c);V(i=D=1h;D<15;i=D+=4){2t.1m=i;1e.r=8.c.I[i];1e.g=8.c.I[i+1];1e.b=8.c.I[i+2];1e.a=8.c.I[i+3];1X=8.1r.4g.1g(2t,1e);u(!(1X.a!=M)){1X.a=1e.a}8.c.I[i]=1f.27(1X.r);8.c.I[i+1]=1f.27(1X.g);8.c.I[i+2]=1f.27(1X.b);8.c.I[i+3]=1f.27(1X.a)}8.3s(2a);u(E.1W){9 3e["5X"]()}};1c.K.6p=o(2a,1h,15){G J,3I,26,1M,3a,1R,i,j,k,23,n,W,1p,2t,1X,D,1b,1l;W=8.1r.W;26=8.1r.26;1R=8.1r.1R;n=8.c.I.Q;J=8.1r.J;3I=R.6X(J.Q);23=[];1v.1E("97 23 - T: "+8.1r.W);1h=R.U(1h,8.c.1F.H*4*((3I-1)/2));15=R.1w(15,n-(8.c.1F.H*4*((3I-1)/2)));1M=(3I-1)/2;2t=1s 1U(8.c);V(i=D=1h;D<15;i=D+=4){2t.1m=i;3a=0;V(j=1b=-1M;-1M<=1M?1b<=1M:1b>=1M;j=-1M<=1M?++1b:--1b){V(k=1l=1M;1M<=-1M?1l<=-1M:1l>=-1M;k=1M<=-1M?++1l:--1l){1p=2t.7m(j,k);23[3a*3]=1p.r;23[3a*3+1]=1p.g;23[3a*3+2]=1p.b;3a++}}1X=8.5G(J,23,1R,26);8.2B[i]=1f.27(1X.r);8.2B[i+1]=1f.27(1X.g);8.2B[i+2]=1f.27(1X.b);8.2B[i+3]=8.c.I[i+3]}8.3s(2a);u(E.1W){9 3e["5X"]()}};1c.K.3s=o(2a){G i,D,F;u(2a>=0){1v.1E("6k #"+2a+" 4W! T: "+8.1r.W)}8.3X++;1I.2H(8.c,"3s",{66:2a,98:8.3X,60:1c.2y});u(8.3X===1c.2y){u(8.1r.X===T.1V.5F){V(i=D=0,F=8.c.I.Q;0<=F?D<F:D>F;i=0<=F?++D:--D){8.c.I[i]=8.2B[i]}}u(2a>=0){1v.1E("T "+8.1r.W+" 4W!")}1I.2H(8.c,"73",8.1r);9 8.2q()}};1c.K.5G=o(J,23,1R,26){G i,1y,D,F;1y={r:0,g:0,b:0};V(i=D=0,F=J.Q;0<=F?D<F:D>F;i=0<=F?++D:--D){1y.r+=J[i]*23[i*3];1y.g+=J[i]*23[i*3+1];1y.b+=J[i]*23[i*3+2]}1y.r=(1y.r/1R)+26;1y.g=(1y.g/1R)+26;1y.b=(1y.b/1R)+26;9 1y};1c.K.6z=o(1q,1d){G 19,2V,13=8;19=1A.2f(\'19\');19.4X=o(){1q.1j.3k(19,0,0,13.c.1F.H,13.c.1F.N);1q.1G=1q.1j.2E(0,0,13.c.1F.H,13.c.1F.N);1q.I=1q.1G.1e;13.c.I=1q.I;9 13.2q()};2V=1Y.99(1d);9 19.1d=2V!=M?2V:1d};9 1c})();E.1S=1S=(o(){o 1S(){}1S.33={};1S.7u=o(2v){9 8.33[2v]!=M};1S.2O=o(2v){9 8.33[2v]};1S.65=o(W,2j){9 8.33[W]=2j};1S.2h=o(2v,1C){G 13=8;47(o(){9 1C.1g(13.2O(2v),13.2O(2v))},0);9 8.2O(2v)};1S.9c=o(W){u(W==M){W=1P}u(W){9 6h 8.33[W]}S{9 8.33={}}};9 1S})();1x.Z("7A",o(O,L){9{r:O.r,g:O.g,b:O.b}});1x.Z("9e",o(O,L){9{r:(O.r*L.r)/B,g:(O.g*L.g)/B,b:(O.b*L.b)/B}});1x.Z("9f",o(O,L){9{r:B-(((B-O.r)*(B-L.r))/B),g:B-(((B-O.g)*(B-L.g))/B),b:B-(((B-O.b)*(B-L.b))/B)}});1x.Z("9g",o(O,L){G 17;17={};17.r=L.r>1o?B-2*(B-O.r)*(B-L.r)/B:(L.r*O.r*2)/B;17.g=L.g>1o?B-2*(B-O.g)*(B-L.g)/B:(L.g*O.g*2)/B;17.b=L.b>1o?B-2*(B-O.b)*(B-L.b)/B:(L.b*O.b*2)/B;9 17});1x.Z("9i",o(O,L){9{r:O.r-L.r,g:O.g-L.g,b:O.b-L.b}});1x.Z("9j",o(O,L){9{r:L.r+O.r,g:L.g+O.g,b:L.b+O.b}});1x.Z("9k",o(O,L){9{r:1o-2*(L.r-1o)*(O.r-1o)/B,g:1o-2*(L.g-1o)*(O.g-1o)/B,b:1o-2*(L.b-1o)*(O.b-1o)/B}});1x.Z("9l",o(O,L){G 17;17={};17.r=L.r>1o?B-((B-L.r)*(B-(O.r-1o)))/B:(L.r*(O.r+1o))/B;17.g=L.g>1o?B-((B-L.g)*(B-(O.g-1o)))/B:(L.g*(O.g+1o))/B;17.b=L.b>1o?B-((B-L.b)*(B-(O.b-1o)))/B:(L.b*(O.b+1o))/B;9 17});1x.Z("9m",o(O,L){9{r:L.r>O.r?L.r:O.r,g:L.g>O.g?L.g:O.g,b:L.b>O.b?L.b:O.b}});1x.Z("9n",o(O,L){9{r:L.r>O.r?O.r:L.r,g:L.g>O.g?O.g:L.g,b:L.b>O.b?O.b:L.b}});T.Z("45",o(){G 2S;u(14.Q===1){2S=1t.4F(14[0])}S{2S={r:14[0],g:14[1],b:14[2]}}9 8.1L("45",o(A){A.r=2S.r;A.g=2S.g;A.b=2S.b;A.a=B;9 A})});T.Z("6Y",o(J){J=R.3H(B*(J/1i));9 8.1L("6Y",o(A){A.r+=J;A.g+=J;A.b+=J;9 A})});T.Z("6V",o(J){J*=-0.9r;9 8.1L("6V",o(A){G U;U=R.U(A.r,A.g,A.b);u(A.r!==U){A.r+=(U-A.r)*J}u(A.g!==U){A.g+=(U-A.g)*J}u(A.b!==U){A.b+=(U-A.b)*J}9 A})});T.Z("6G",o(J){J*=-1;9 8.1L("6G",o(A){G 3h,2x,U;U=R.U(A.r,A.g,A.b);2x=(A.r+A.g+A.b)/3;3h=((R.2s(U-2x)*2/B)*J)/1i;u(A.r!==U){A.r+=(U-A.r)*3h}u(A.g!==U){A.g+=(U-A.g)*3h}u(A.b!==U){A.b+=(U-A.b)*3h}9 A})});T.Z("6u",o(J){9 8.1L("6u",o(A){G 2x;2x=21.7b(A);A.r=2x;A.g=2x;A.b=2x;9 A})});T.Z("6r",o(J){J=R.1B((J+1i)/1i,2);9 8.1L("6r",o(A){A.r/=B;A.r-=0.5;A.r*=J;A.r+=0.5;A.r*=B;A.g/=B;A.g-=0.5;A.g*=J;A.g+=0.5;A.g*=B;A.b/=B;A.b-=0.5;A.b*=J;A.b+=0.5;A.b*=B;9 A})});T.Z("6j",o(J){9 8.1L("6j",o(A){G h,2A,28;2A=1t.6a(A.r,A.g,A.b);h=2A.h*1i;h+=R.2s(J);h=h%1i;h/=1i;2A.h=h;28=1t.6c(2A.h,2A.s,2A.v);28.a=A.a;9 28})});T.Z("7v",o(){G 2W,28;u(14.Q===2){28=1t.4F(14[0]);2W=14[1]}S u(14.Q===4){28={r:14[0],g:14[1],b:14[2]};2W=14[3]}9 8.1L("7v",o(A){A.r-=(A.r-28.r)*(2W/1i);A.g-=(A.g-28.g)*(2W/1i);A.b-=(A.b-28.b)*(2W/1i);9 A})});T.Z("7t",o(){9 8.1L("7t",o(A){A.r=B-A.r;A.g=B-A.g;A.b=B-A.b;9 A})});T.Z("70",o(J){u(J==M){J=1i}J/=1i;9 8.1L("70",o(A){A.r=R.1w(B,(A.r*(1-(0.9F*J)))+(A.g*(0.9G*J))+(A.b*(0.9H*J)));A.g=R.1w(B,(A.r*(0.9I*J))+(A.g*(1-(0.9J*J)))+(A.b*(0.9K*J)));A.b=R.1w(B,(A.r*(0.9L*J))+(A.g*(0.9M*J))+(A.b*(1-(0.9N*J))));9 A})});T.Z("6w",o(J){9 8.1L("6w",o(A){A.r=R.1B(A.r/B,J)*B;A.g=R.1B(A.g/B,J)*B;A.b=R.1B(A.b/B,J)*B;9 A})});T.Z("7E",o(J){J=R.2s(J)*2.55;9 8.1L("7E",o(A){G 2o;2o=21.6Z(J*-1,J);A.r+=2o;A.g+=2o;A.b+=2o;9 A})});T.Z("7p",o(J){J=R.2s(J)*2.55;9 8.1L("7p",o(A){u(A.r>B-J){A.r=B}S u(A.r<J){A.r=0}u(A.g>B-J){A.g=B}S u(A.g<J){A.g=0}u(A.b>B-J){A.b=B}S u(A.b<J){A.b=0}9 A})});T.Z("77",o(1a){G 34,5f;u(1k 1a!=="2m"){9 8}V(34 4d 1a){u(!4z.1g(1a,34))3x;5f=1a[34];u(5f===0){6h 1a[34];3x}1a[34]/=1i}u(1a.Q===0){9 8}9 8.1L("77",o(A){u(1a.4B!=M){u(1a.4B>0){A.r+=(B-A.r)*1a.4B}S{A.r-=A.r*R.2s(1a.4B)}}u(1a.4A!=M){u(1a.4A>0){A.g+=(B-A.g)*1a.4A}S{A.g-=A.g*R.2s(1a.4A)}}u(1a.4t!=M){u(1a.4t>0){A.b+=(B-A.b)*1a.4t}S{A.b-=A.b*R.2s(1a.4t)}}9 A})});T.Z("4m",o(){G 1u,2i,24,25,2c,15,i,1h,D,1b,F,1J;2i=14[0],24=2<=14.Q?7H.1g(14,1):[];u(1k 2i==="3P"){2i=2i.a1("")}u(2i[0]==="v"){2i=[\'r\',\'g\',\'b\']}u(24.Q<3||24.Q>4){2D"4u a2 a3 14 48 4m 3p";}1h=24[0];25=24[1];2c=24.Q===4?24[2]:24[1];15=24[24.Q-1];1u=21.1u(1h,25,2c,15,0,B);u(1h[0]>0){V(i=D=0,F=1h[0];0<=F?D<F:D>F;i=0<=F?++D:--D){1u[i]=1h[1]}}u(15[0]<B){V(i=1b=1J=15[0];1J<=B?1b<=B:1b>=B;i=1J<=B?++1b:--1b){1u[i]=15[1]}}9 8.1L("4m",o(A){G 1l,2l;V(i=1l=0,2l=2i.Q;0<=2l?1l<2l:1l>2l;i=0<=2l?++1l:--1l){A[2i[i]]=1u[A[2i[i]]]}9 A})});T.Z("a4",o(J){G 25,2c,p;p=R.2s(J)/1i;25=[0,B*p];2c=[B-(B*p),B];u(J<0){25=25.7F();2c=2c.7F()}9 8.4m(\'28\',[0,0],25,2c,[B,B])});E.1K.Z("5l",o(H,N,x,y){G C,1H;u(x==M){x=0}u(y==M){y=0}u(1k 1O!=="2C"&&1O!==M){C=1s 2P(H,N)}S{C=1A.2f(\'C\');1f.3C(8.C,C);C.H=H;C.N=N}1H=C.2g(\'2d\');1H.3k(8.C,x,y,H,N,0,0,H,N);8.3K={x:x,y:y};8.64=2r;9 8.43(C)});E.1K.Z("49",o(1D){G C,1H;u(1D==M){1D=M}u(1D===M||(!(1D.H!=M)&&!(1D.N!=M))){1v.7r("4u a9 aa 1F 7G V 49");9}u(!(1D.H!=M)){1D.H=8.C.H*1D.N/8.C.N}S u(!(1D.N!=M)){1D.N=8.C.N*1D.H/8.C.H}u(1k 1O!=="2C"&&1O!==M){C=1s 2P(1D.H,1D.N)}S{C=1A.2f(\'C\');1f.3C(8.C,C);C.H=1D.H;C.N=1D.N}1H=C.2g(\'2d\');1H.3k(8.C,0,0,8.C.H,8.C.N,0,0,1D.H,1D.N);8.4f=2r;9 8.43(C)});E.T.Z("5l",o(){9 8.5E("5l",5p.K.2M.1g(14,0))});E.T.Z("49",o(){9 8.5E("49",5p.K.2M.1g(14,0))})}).1g(8);',62,631,'||||||||this|return|||||||||||||||function||||||if||||||rgba|255|canvas|_i|Caman|_ref|var|width|pixelData|adjust|prototype|rgbaParent|null|height|rgbaLayer|image|length|Math|else|Filter|max|for|name|type|_results|register||_len||_this|arguments|end||result|args|img|options|_j|Renderer|src|data|Util|call|start|100|context|typeof|_k|loc|case|128|pixel|layer|currentJob|new|Convert|bezier|Log|min|Blender|val|push|document|pow|callback|newDims|debug|dimensions|imageData|ctx|Event|_ref1|Plugin|process|builder|id|exports|false|parentData|divisor|Store|newLoc|PixelInfo|Type|NodeJS|res|IO|Layer|055|Calculate|levels|kernel|cps|ctrl1|bias|clampRGB|rgb|target|bnum|caman|ctrl2||fn|createElement|getContext|execute|chans|obj|plugin|_ref2|object|ratio|rand|ele|processNext|true|abs|pixelInfo|initObj|search|devicePixelRatio|avg|Blocks|opacity|hsv|modPixelData|undefined|throw|getImageData|curveY|originalPixelData|trigger|events|renderer|hex|lang|slice|layerData|get|Canvas|CamanParser|break|color|Image|apply|proxyUrl|level|window|x1|cb|Analyze|originalWidth|1379310345|items|chan|file|leftCoord|finishInit|add|getAttribute|builderIndex|y1|switch|toLowerCase|Fiber|Logger|renderQueue|amt|fs|numPixels|drawImage|preScaledHeight|originalHeight|imgs|preScaledWidth|filter|func|event|blockFinished|unparsedInstructions|copy|blockN|prop|continue|scaledCanvas|x2|y2|key|copyAttributes|getFloat|opts|currentLayer|attr|floor|adjustSize|pixelStack|cropCoordinates|parseInt|Cx|Cy|imageLoaded|string|x0|allowRevert|y0|rightCoord|horiz|vert|disabled|blocksDone|matches|imageUrl|startX|Uint8Array|initType|replaceCanvas|complete|fillColor|coord|setTimeout|to|resize|putImageData|oldCanvas|INST_REGEX|in|hiDPIRatio|resized|processFn|initializedPixelData|Root|lowBound|highBound|require|curves|Bx|By|pixels|overwrite|hueToRGB|langToExt|blue|Invalid|startY|sel|hiDPIReplacement|root|__hasProp|green|red|dest|substr|getAttrId|hexToRGB|y3|crossOrigin|x3|bind|version|curveX|Ay|Ax|layerStack|nodeName|04045|except|canvasQueue|round|listener|readyState|finished|onload|parentNode|replaceChild|lastBlockN|blockPixelLength|imageAdjustments|eachBlock|blenders||finishedFn|0031308|4166666667|waitForImageLoaded|whiteX|whiteY|whiteZ|008856451679|3333333333|value|787037037|uniqid|job|2068965517|1284185493|crop|xyz|match|instFunc|Array|inst|plugins|_fn|_type|LoadOverlay|DOMUpdated|Revert|hiDPIDisabled|LayerFinished|__indexOf|coordinatesToLocation|LayerDequeue|backingStoreRatio|newLayer|processPlugin|Kernel|processKernel|Single|endX|toBase64|blendingMode|stats|endY|_ref3|hasId|assignId|DEBUG|executeLayer|pushContext|popContext|1000|pop|applyCurrentLayer|yield|applyToParent|from|totalBlocks|setInitObject|toImage|autoload|cropped|put|blockNum|me|node|item|rgbToHSV|initNode|hsvToRGB|parser|hidpi|rgbToXYZ|parse|delete|release|hue|Block|style|ready|dataStr|px|renderKernel|renderBlock|contrast|initImage|initCanvas|greyscale|RegExp|gamma|executeFilter|executePlugin|loadOverlay|shift|anonymous|try|catch|setAttribute|xyzToLab|vibrance|hiDPICapable|loaded|newCanvas|047|108|883|Width|render|scaled|renderStart|Height|116|500|200|saturation|originalVisiblePixels|sqrt|brightness|randomRange|sepia|types|processStart|processComplete|renderFinished|blockStarted|err|channels|date|domIsLoaded|parseArguments|luminance|setup|__bind|filterFunc|domainRegex|nowLoc|remoteProxy|proxyParam|corsEnabled|locationToCoordinates|needsHiDPISwap|getPixelRelative|swapped|isRemote|clip|console|error|log|invert|has|colorize|DOM|initialized|mode|addEventListener|normal|nodeSave|browserSave|png|noise|reverse|given|__slice|hiDPIAdjustments|rgbToHSL|href|stream|octet|statSync|isFile|Creating|output|writeFile|toBuffer|Finished|writing|toDataURL|replace|save|caman_proxy|layerID|createImageData|setBlendingMode|proxies|copyParent|js|javascript|overlayImage|pl|perl|py|python|info|warn|rb|ruby|useProxy|encodeURIComponent|locationXY|credentials|use|domain|https|putPixelRelative|http|getPixel|putPixel|listen|void|os|cpus|labToRGB|rgbToLab|labToXYZ|0570|2040|0557|0415|8758|default|9689|4986|5372|2406|xyzToRGB|9505|1192|run|0193|0722|Executing|7152|2126|1805|3576|4124|Start|End|hslToRGB|location|startPixel|endPixel|charAt|114|587||299|toFixed||random|Rendering|blocksFinished|remoteCheck|distance|Function|flush|DOMContentLoaded|multiply|screen|overlay|querySelectorAll|difference|addition|exclusion|softLight|lighten|darken|calculateLevels|reset|revert|01|backingStorePixelRatio|oBackingStorePixelRatio|msBackingStorePixelRatio|mozBackingStorePixelRatio|webkitBackingStorePixelRatio|scale|HiDPI|resetOriginalPixelData|URL|using|detected|Remote|onerror|607|769|189|349|314|168|272|534|869|Initializing|readystatechange|analyze|isNaN|instanceof|camanProxyUrl|Released|Version|toString|2013|fibers|nodeValue|attributes|split|number|of|exposure|extend|querySelector|indexOf|hasOwnProperty|or|missing'.split('|'),0,{}))
/* eslint quotes: 0 */ // Defines the MongoDB $jsonSchema for service `nedb5`. (Can be re-generated.) const merge = require('lodash.merge'); // !code: imports // !end // !code: init // !end let moduleExports = merge({}, // !<DEFAULT> code: model { bsonType: "object", additionalProperties: false, properties: { _id: { bsonType: "objectId" } } }, // !end // !code: moduleExports // !end ); // !code: exports // !end module.exports = moduleExports; // !code: funcs // !end // !code: end // !end
from pathlib import Path from tempfile import TemporaryDirectory from tuulfile.directory import cd from tuulver.version import bump_build, \ bump_major, \ bump_minor, \ bump_patch, \ bump_pre, \ create_version_file, \ emit_product_name, \ emit_version def test_create_version_file(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'justtestin') f = Path(fname) assert f.exists() def test_emit_product_name(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' prod = 'productname' create_version_file(fname, prod) assert emit_product_name(fname) == prod def test_emit_version(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'whatever') assert emit_version(fname) == '0.0.0' def test_bump_major(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'yup') bump_major(fname) assert emit_version(fname) == '1.0.0' def test_bump_minor(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'yup') bump_minor(fname) assert emit_version(fname) == '0.1.0' def test_bump_patch(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'yup') bump_patch(fname) assert emit_version(fname) == '0.0.1' def test_bump_pre(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'yup') bump_pre(fname) assert emit_version(fname) == '0.0.0-pre.1' def test_bump_build(): with TemporaryDirectory() as p: with cd(p): fname = 'version.yaml' create_version_file(fname, 'yup') bump_pre(fname) bump_build(fname) bump_build(fname) assert emit_version(fname) == '0.0.0-pre.1+build.2' bump_pre(fname) bump_build(fname) assert emit_version(fname) == '0.0.0-pre.2+build.1'
/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ /* * * Copyright (c) 1986-2006 * Biomedical Imaging Resource * Mayo Clinic * * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1) Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Mayo Clinic nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /** * \file itkAnalyzeDbh.h * This file contains notes about the Analyze 7.5 file format gathered from * several sources. A special note of thanks to Dennis P. Hanson for his * generous contributions in getting this information correct. * - NOTE: The comments were addded as part of the Insight Segmentation * And Registration Toolkit. * * Function: This file contains the structure definition for Analyze files */ #ifndef __itkAnalyzeDbh_h #define __itkAnalyzeDbh_h namespace itk { /** * \enum DataTypeKeyValues * Acceptable values for hdr.dime.datatype */ enum DataTypeKeyValues { ANALYZE_DT_UNKNOWN =0, /**< Deontes that the data type is unknon */ ANALYZE_DT_BINARY =1, /**< Deontes that the data type is binary */ ANALYZE_DT_UNSIGNED_CHAR=2, /**< Deontes that the data type is unsigned char */ ANALYZE_DT_SIGNED_SHORT =4, /**< Deontes that the data type is signed short */ ANALYZE_DT_SIGNED_INT =8, /**< Deontes that the data type is signed int */ ANALYZE_DT_FLOAT =16, /**< Deontes that the data type is single precision floating point */ ANALYZE_DT_COMPLEX =32, /**< Deontes that the data type is pairs of single precision floating point numbers */ ANALYZE_DT_DOUBLE =64, /**< Deontes that the data type is double precision floating point */ ANALYZE_DT_RGB =128,/**< Deontes that the data type is triples of unsigned char */ ANALYZE_DT_ALL =255,/**< Deontes that the data type is unknon */ //Obsolete, using SPM, B2ANALYZE_DT_UNSIGNED_SHORT =6, /**< Deontes that the data type is unsigned short in brains2 analyze extensions*/ //Obsolete, using SPM, B2ANALYZE_DT_UNSIGNED_INT =12, /**< Deontes that the data type is unsigned int in brains2 analyze extensions*/ SPMANALYZE_DT_UNSIGNED_SHORT=132,/**< Deontes that the data type is unsigned short in SPM analyze extensions*/ SPMANALYZE_DT_UNSIGNED_INT =136 /**< Deontes that the data type is unsigned int in SPM analyze extensions*/ }; /** * \enum DataTypeIndex * The index into the DataTypes array for each type. */ enum DataTypeIndex { ANALYZE_DT_INDEX_UNKNOWN =0, ANALYZE_DT_INDEX_BINARY =1, ANALYZE_DT_INDEX_UNSIGNED_CHAR =2, ANALYZE_DT_INDEX_SIGNED_SHORT =3, ANALYZE_DT_INDEX_SIGNED_INT =4, ANALYZE_DT_INDEX_FLOAT =5, ANALYZE_DT_INDEX_COMPLEX =6, ANALYZE_DT_INDEX_DOUBLE =7, ANALYZE_DT_INDEX_RGB =8, ANALYZE_DT_INDEX_ALL =9, //Obsolete, using SPM, B2ANALYZE_DT_INDEX_UNSIGNED_SHORT=10, //Obsolete, using SPM, B2ANALYZE_DT_INDEX_UNSIGNED_INT =11, SPMANALYZE_DT_INDEX_UNSIGNED_SHORT =10, SPMANALYZE_DT_INDEX_UNSIGNED_INT =11 }; /** * \var DataTypes * An array of the Analyze v7.5 known DataTypes * - 0-->"UNKNOWN" * - 1-->"BINARY" * - 2-->"CHAR" * - 3-->"SHORT" * - 4-->"INT" * - 5-->"FLOAT" * - 6-->"COMPLEX" * - 7-->"DOUBLE" * - 8-->"RGB" * - 9-->"ALL" * - 10-->"USHORT" * - 11-->"UINT" * @see DataTypes * @see DataTypeSizes * @see DataTypeKey */ extern const char DataTypes[12][10]; /** * \var DataTypeSizes * An array with the corresponding number of bits for each image type. * - 0-->0 * - 1-->1 * - 2-->8 * - 3-->16 * - 4-->32 * - 5-->32 * - 6-->64 * - 7-->64 * - 8-->24 * - 9-->0 * - 10-->16 * - 11->32 * @see DataTypes * @see DataTypeSizes * @see DataTypeKey */ extern const short int DataTypeSizes[12]; /** * \var DataTypeKey * An array with Data type key sizes * - 0-->ANALYZE_DT_UNKNOWN * - 1-->ANALYZE_DT_BINARY * - 2-->ANALYZE_DT_UNSIGNED_CHAR * - 3-->ANALYZE_DT_SIGNED_SHORT * - 4-->ANALYZE_DT_SIGNED_INT * - 5-->ANALYZE_DT_FLOAT * - 6-->ANALYZE_DT_COMPLEX * - 7-->ANALYZE_DT_DOUBLE * - 8-->ANALYZE_DT_RGB * - 9-->ANALYZE_DT_ALL * - 10-->SPMANALYZE_DT_UNSIGNED_SHORT * - 11-->SPMANALYZE_DT_UNSIGNED_INT * @see DataTypes * @see DataTypeSizes * @see DataTypeKey */ extern const short int DataTypeKey[12]; /** * \struct header_key * - (c) Copyright, 1986-1995 * - Biomedical Imaging Resource * - Mayo Foundation */ struct header_key /* header_key */ {/*off + size*/ /*0 + 4 */int sizeof_hdr; /**< Must indicate the byte size of header file, Almost always 348 bytes, but may be larger for sites that have extended the default implementation. This feild is used by analyze to determine if the file is big endian or little endian. If the size of the *.hdr file does not equal this value, then the structure needs to be byte swapped. */ /*4 + 10 */char data_type[10]; /**< A convenience character string that has a 1 to 1 correspondence with the short int datatype field of the image_dimension. @see DataTypes*/ /*14 + 18*/char db_name[18]; /**< A convenience character string that should be the same as the the file name without th e*.hdr or *.img extension. */ /*32 + 4 */int extents; /**< Should be 16384, the image file is created as contiguous with a minium extent size. This field may be used to check endedness of the file */ /*36 + 2 */short int session_error;/**< This feild is not used for anything other than internal use in Pre 1995 versions of Analyze. Setting this to 0 causes no problems. */ /*38 + 1 */char regular; /**< This must be 'r' to indicate that all images and volumes are the same size. */ /*39 + 1 */char hkey_un0; /**< Unused field for future expansion. */ };/* total=40 */ /** * \struct image_dimension struct decribes the organization and * size of images. These elements enable IO routines to reference * images by volume and slice number. * - (c) Copyright, 1986-1995 * - Biomedical Imaging Resource * - Mayo Foundation */ struct image_dimension /* image_dimension */ { /* off + size*/ /*0 + 16 */ short int dim[8]; /**< Array of image dimensions - dim[0] number of dimensions; usually 4 - dim[1] image X dimension, i.e. number of voxels per row (adjacent memory locations) - dim[2] image Y dimension, i.e. number of rows per slice - dim[3] Volume Z dimension, i.e. number of slices per volume - dim[4] Number of time points, i.e. number of volumes per series - . . . - dim[7] volumes in file, i.e. number of volumes per series */ /*16 + 4 */ char vox_units[4]; /**< Specifies the spatial units of measure for a voxel, valid values are "mm","cm" and "in". NOTE: if no match is found, "mm" is assumed. */ /*20 + 4 */ char cal_units[8]; /**< Specifies the name of the calibration unit, valid values are "mm","cm" and "in". NOTE: if no match is found, "mm" is assumed. */ /*24 + 2 */ short int unused1; /**< Unused field for future expansion. */ /*30 + 2 */ short int datatype;/**< A short int that defines the type of data being stored. NOTE: THIS MUST BE FILLED IN. This is the field that most applications use to determine the data storage type. Valid values are defined by DataTypeKeyValues. @see DataTypeKeyValues */ /*32 + 2 */ short int bitpix; /**< Bits per pixel. This field must agree with the datatype and data_type fields. @see DataTypeSizes */ /*34 + 2 */ short int dim_un0; /**< Unused value for future expansion. */ /*36 + 32*/ float pixdim[8]; /**< Parallel array to dim giving voxel dimensions NOTE: pixdim[0] is ignored, and the number of dims are taken from dims[0], and pixdim[1..7] are the actual pixdims. - pixdim[0] this field is not used - pixdim[1] voxel width - pixdim[2] voxel height - pixdim[3] voxel depth or slice thickness - pixdim[4] - . . . - pixdim[7] */ /*68 + 4 */ float vox_offset; /**< Byte offset in the .img file at which voxels start. If value is negative specifies that the absolute value is applied for every image in the file. */ /*72 + 4 */ float roi_scale; /**< The "ROI Scale Factor" was really added in by the developers of SPM. This is used as a multiplicative factor to scale all values measured in the ROI module by this multiplicative factor. This really shouldn't be set by any other application outside of Analyze and SPM. */ /*76 + 4 */ float funused1; /**< Unused, for future expansion */ /*80 + 4 */ float funused2; /**< Unused, for future expansion */ /*84 + 4 */ float cal_max; /**< The parameters 'cal_max' and 'cal_min' provided a mechanism for rescaling the voxel values into a different range of value representation. This string allowed whatever the range of value representation was to be expressed with these string characters. Never really used in a robust manner. */ /*88 + 4 */ float cal_min; /**< @see image_dimension::cal_max */ /*92 + 4 */ int compressed; /**< Valid values are only 0 and 1. A 0 value indicates that the file is uncompressed. A 1 value indicates that the *.img file has been compress with the standard Unix 'compress' utility. - filename, compressed value, description - basename.img, 0, uncompressed image file of size - basename.img, 1, compressed image file using unix 'compress' utility - basename.img.Z, 0, compressed image using unix 'compress' utiltiy - basename.img.gz, 0, compressed image file gzip utility - basename.img.Z, 1, invalid setting - basename.img.gz, 1, invalid setting */ /*96 + 4 */ int verified; /**< This was only used internally by Pre 1995 versions of analyze. */ /*100 + 4*/ int glmax; /**< This is the global max/min for the entire volume image (or sets of volume images if there are multiple volumes in this file). The cal_max/min are used only if the desired representation range of the data is different from the true range of the data. For example, CT data may be stored in signed shorts with a range from 0 - 2000, but an appropriate representation range for these values in in Hounsfield units from -1000 to 1000. Any reported values in Analyze were scaled via this value scaling function. UNSIGNED INT and COMPLEX were really not supported under ANALYZE 7.5., so these should be avoided in the Analyze 7.5 file format. The max/min for FLOATs was the nearest integer to the floating point values. */ /*104 + 4*/ int glmin; /**< @see image_dimension::glmax */ }/*total=108 */; /** * \struct data_history * Most of these are historical, relevant to use with the Dynamic Spatial * Reconstructor scanner developed at the Mayo BIR in the late 70's and through * the 80's. However, the 'orient' field is very important and * is used to indicate individual slice orientation and determines whether * the Analyze 'Movie' program will attempt to flip the images before * displaying a movie sequence. It is perfectly acceptable to put any * values you want into those fields because they do not(should not) * affect image processing. * - (c) Copyright, 1986-1995 * - Biomedical Imaging Resource * - Mayo Foundation */ struct data_history /* data_history */ {/*off + size*/ /*0 + 80 */char descrip[80]; /**< A place to put a short description of the data */ /*80 + 24 */char aux_file[24]; /**< A place to put the name of an auxillary file to use instead of the default .img file. This is not currently used by the Analyze program. */ /*104 + 1 */char orient; /**< The 'orient' field in the data_history structure specifies the primary orientation of the data as it is stored in the file on disk. This usually corresponds to the orientation in the plane of acquisition, given that this would correspond to the order in which the data is written to disk by the scanner or other software application. It would be vary rare that you would ever encounter any old ANALYZE 7.5 files that contain values of 'orient' which indicate that the data has been 'flipped'. The 'flipped flag' values were really only used internal to the Analyze program to precondition data for fast display in the Movie module, where the images were actually flipped vertically in order to accommodate the raster paint order on older graphics devices. The only cases you will encounter will have values of 0, 1, or 2. - hdr->orient "MayoClinic/Analyze" Origin dims[1] dims[2] dims[3] - ====================================================================== - 0 transverse-unflipped IRP R->L P->A I->S - 1 coronal-unflipped IRP R->L I->S P->A - 2 sagittal-unflipped IRP P->A I->S R->L - 3 transverse-flipped IRA R->L A->P I->S - 4 coronal-flipped SRP R->L S->I P->A - 5 sagittal-flipped ILP P->A I->S L->R - Where the Origin disignators are with respect to the patient - [(I)nferior|(S)uperior] [(L}eft|(R)ight] [(A)nterior|(P)osterior] SPECIAL NOTE: THE BEHAVIOR OF THIS IO ROUTINE DIFFERS FROM THAT OF ANALYZE! NO RE_ORIENTATION OF THE DATA WILL OCCUR IN THE ITK IMPLEMENTATION. Upon loading into the Analyze program, all data is reoriented into the 3D Analyze Coordinate System. The orientation of patient anatomy in the 3D Analyze Coordinate System has a fixed orientation relative to each of the orthogonal axes. This coordinate system does fix the origin in the subject's Inferior-Right-Posterior location relative to anatomy, with a left-handed coordinate system for specification of the other anatomy relative to the axes as given here: - X-Y plane is Transverse - X-Z plane is Coronal - Y-Z plane is Sagittal where: - X axis runs from patient right (low X) to patient left (high X) - Y axis runs from posterior (low Y) to anterior (high Y) - Z axis runs from inferior (low Z) to superior (high Z) */ /*105 + 10*/char originator[10];/**< The person or group that generated this image */ /*115 + 10*/char generated[10]; /**< The date the image was generated. */ /*125 + 10*/char scannum[10]; /**< An instituion independent identifier for the image */ /*135 + 10*/char patient_id[10];/**< An instituion independant identifier for the subject of the scan */ /*145 + 10*/char exp_date[10]; /**< Experiment date - these were used for DSR experiments */ /*155 + 10*/char exp_time[10]; /**< Experiment time - these were used for DSR experiments */ /*165 + 3 */char hist_un0[3]; /**< Unused padding of the structure */ /*168 + 4 */int views; /**< Number of views in reconstruction - these were used for DSR experiments */ /*172 + 4 */int vols_added; /**< Number of time points summed together - these were used for DSR experiments */ /*176 + 4 */int start_field; /**< Video field for first view used in reconstruction - these were used for DSR experiments */ /*180 + 4 */int field_skip; /**< Video field skip factor used in the reconstructed image - these were used for DSR experiments */ /*184 + 4 */int omax; /**< The omax/omin and smax/smin relate to rescaling of the value range via the Load process, where both the 'original' omax/omin and the 'scaled' omax/omin could be set to rescale the value range of the data when it is loaded. */ /*188 + 4 */int omin; /**< @see data_history::omax */ /*192 + 4 */int smax; /**< @see data_history::omax */ /*196 + 4 */int smin; /**< @see data_history::omax */ };/* total=200 */ /** * \struct dsr * Analyze 7.5 header structure * The header file is a 'C' structure which describes the dimensions * and properties of the voxel data. * NOTE: Values in convenience fields should have no * effect on how this file is interpreted. * These fields are not used read by the Analyze Program, * but are written as a convenience for interpreting the * file without an analyze compliant file reader. * - (c) Copyright, 1986-1995 * - Biomedical Imaging Resource * - Mayo Foundation */ struct dsr/* dsr */ {/* off + size*/ /*0 + 40 */ struct header_key hk; /**< The header_key structure. @see header_key */ /*40 + 108 */ struct image_dimension dime; /**< The image_dimension structure. @see image_dimension */ /*148 + 200*/ struct data_history hist; /**< The data_history structure. @see data_history */ }/*total=348*/; } //End namespace itk #endif /* __dbh_h__ */
# Copyright (c) Ye Liu. All rights reserved. from .base import Hook from .checkpoint import CheckpointHook from .closure import ClosureHook from .eval import EvalHook from .events import (CommandLineWriter, EventWriterHook, JSONWriter, TensorboardWriter) from .lr_updater import LrUpdaterHook from .memory import EmptyCacheHook from .optimizer import OptimizerHook from .precise_bn import PreciseBNHook from .sampler_seed import SamplerSeedHook from .timer import TimerHook __all__ = [ 'Hook', 'CheckpointHook', 'ClosureHook', 'EvalHook', 'CommandLineWriter', 'EventWriterHook', 'JSONWriter', 'TensorboardWriter', 'LrUpdaterHook', 'EmptyCacheHook', 'OptimizerHook', 'PreciseBNHook', 'SamplerSeedHook', 'TimerHook' ]
/* * jQuery Superfish Menu Plugin - v1.7.4 * Copyright (c) 2013 Joel Birch * * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl.html */ (function ($, window) { "use strict"; var methods = (function () { // private properties and methods go here var c = { bcClass: 'sf-breadcrumb', menuClass: 'sf-js-enabled', anchorClass: 'sf-with-ul', menuArrowClass: 'sf-arrows' }, ios = (function () { var ios = /iPhone|iPad|iPod/i.test(navigator.userAgent); if (ios) { // iOS clicks only bubble as far as body children $(window).load(function () { $('body').children().on('click', $.noop); }); } return ios; })(), wp7 = (function () { var style = document.documentElement.style; return ('behavior' in style && 'fill' in style && /iemobile/i.test(navigator.userAgent)); })(), toggleMenuClasses = function ($menu, o) { var classes = c.menuClass; if (o.cssArrows) { classes += ' ' + c.menuArrowClass; } $menu.toggleClass(classes); }, setPathToCurrent = function ($menu, o) { return $menu.find('li.' + o.pathClass).slice(0, o.pathLevels) .addClass(o.hoverClass + ' ' + c.bcClass) .filter(function () { return ($(this).children(o.popUpSelector).hide().show().length); }).removeClass(o.pathClass); }, toggleAnchorClass = function ($li) { $li.children('a').toggleClass(c.anchorClass); }, toggleTouchAction = function ($menu) { var touchAction = $menu.css('ms-touch-action'); touchAction = (touchAction === 'pan-y') ? 'auto' : 'pan-y'; $menu.css('ms-touch-action', touchAction); }, applyHandlers = function ($menu, o) { var targets = 'li:has(' + o.popUpSelector + ')'; if ($.fn.hoverIntent && !o.disableHI) { $menu.hoverIntent(over, out, targets); } else { $menu .on('mouseenter.superfish', targets, over) .on('mouseleave.superfish', targets, out); } var touchevent = 'MSPointerDown.superfish'; if (!ios) { touchevent += ' touchend.superfish'; } if (wp7) { touchevent += ' mousedown.superfish'; } $menu .on('focusin.superfish', 'li', over) .on('focusout.superfish', 'li', out) .on(touchevent, 'a', o, touchHandler); }, touchHandler = function (e) { var $this = $(this), $ul = $this.siblings(e.data.popUpSelector); if ($ul.length > 0 && $ul.is(':hidden')) { $this.one('click.superfish', false); if (e.type === 'MSPointerDown') { $this.trigger('focus'); } else { $.proxy(over, $this.parent('li'))(); } } }, over = function () { var $this = $(this), o = getOptions($this); clearTimeout(o.sfTimer); $this.siblings().superfish('hide').end().superfish('show'); }, out = function () { var $this = $(this), o = getOptions($this); if (ios) { $.proxy(close, $this, o)(); } else { clearTimeout(o.sfTimer); o.sfTimer = setTimeout($.proxy(close, $this, o), o.delay); } }, close = function (o) { o.retainPath = ($.inArray(this[0], o.$path) > -1); this.superfish('hide'); if (!this.parents('.' + o.hoverClass).length) { o.onIdle.call(getMenu(this)); if (o.$path.length) { $.proxy(over, o.$path)(); } } }, getMenu = function ($el) { return $el.closest('.' + c.menuClass); }, getOptions = function ($el) { return getMenu($el).data('sf-options'); }, resizeBg = function () { if ($(window).width() > 767 && $.fn.superfish.defaults.fakeBg) { var page = $($.fn.superfish.defaults.fakeBgLimiter); var obj = $('.sub-menu'); var offset = $(page).offset(); var x = offset.left; var x1 = $(obj).offset().left; var width = $(page).width(); $('.fake-bg').css( { left: (x - x1), width: width } ); } }; return { // public methods hide: function (instant) { if (this.length) { var $this = this, o = getOptions($this); if (!o) { return this; } var not = (o.retainPath === true) ? o.$path : '', $ul = $this.find('li.' + o.hoverClass).add(this).not(not).removeClass(o.hoverClass).children(o.popUpSelector), speed = o.speedOut; if (instant) { $ul.show(); speed = 0; } o.retainPath = false; o.onBeforeHide.call($ul); $ul.children('li').css({opacity: 0}); $ul.stop(true, true).animate(o.animationOut, { duration: speed, complete: function () { var $this = $(this); o.onHide.call($this); } }); } return this; }, show: function () { var o = getOptions(this); if (!o) { return this; } var $this = this.addClass(o.hoverClass), $ul = $this.children(o.popUpSelector); o.onBeforeShow.call($ul); $ul.stop(true, true).animate(o.animation, { duration: o.speed, step: function () { if ($(this).css('display') != 'none') { resizeBg(); } }, complete: function () { o.onShow.call($ul); $ul.children('li').animate({opacity: 1}, 'normal'); } }); return this; }, destroy: function () { return this.each(function () { var $this = $(this), o = $this.data('sf-options'), $hasPopUp; if (!o) { return false; } $hasPopUp = $this.find(o.popUpSelector).parent('li'); clearTimeout(o.sfTimer); toggleMenuClasses($this, o); toggleAnchorClass($hasPopUp); toggleTouchAction($this); // remove event handlers $this.off('.superfish').off('.hoverIntent'); // clear animation's inline display style $hasPopUp.children(o.popUpSelector).attr('style', function (i, style) { return style.replace(/display[^;]+;?/g, ''); }); // reset 'current' path classes o.$path.removeClass(o.hoverClass + ' ' + c.bcClass).addClass(o.pathClass); $this.find('.' + o.hoverClass).removeClass(o.hoverClass); o.onDestroy.call($this); $this.removeData('sf-options'); }); }, init: function (op) { if ($.fn.superfish.defaults.fakeBg) { $(window).on('resize', resizeBg); $(window).on('orientationchange', resizeBg); $('.sub-menu').prepend('<div class="fake-bg"></div>'); resizeBg(); } return this.each(function () { var $this = $(this); if ($this.data('sf-options')) { return false; } var o = $.extend({}, $.fn.superfish.defaults, op), $hasPopUp = $this.find(o.popUpSelector).parent('li'); o.$path = setPathToCurrent($this, o); $this.data('sf-options', o); toggleMenuClasses($this, o); toggleAnchorClass($hasPopUp); toggleTouchAction($this); applyHandlers($this, o); $hasPopUp.not('.' + c.bcClass).superfish('hide', true); o.onInit.call(this); }); } }; })(); $.fn.superfish = function (method, args) { if (methods[method]) { return methods[method].apply(this, Array.prototype.slice.call(arguments, 1)); } else if (typeof method === 'object' || !method) { return methods.init.apply(this, arguments); } else { return $.error('Method ' + method + ' does not exist on jQuery.fn.superfish'); } }; $.fn.superfish.defaults = { popUpSelector: 'ul,.sf-mega', // within menus context // hoverClass: 'sfHover', pathClass: 'overrideThisToUse', fakeBg: false, fakeBgLimiter: '.page', pathLevels: 1, delay: 800, animation: {height: 'show'}, animationOut: {height: 'hide'}, speed: 'normal', speedOut: 'fast', cssArrows: true, disableHI: false, onInit: $.noop, onBeforeShow: $.noop, onShow: $.noop, onBeforeHide: $.noop, onHide: $.noop, onIdle: $.noop, onDestroy: $.noop }; // soon to be deprecated $.fn.extend({ hideSuperfishUl: methods.hide, showSuperfishUl: methods.show }); })(jQuery, window); $(window).load(function () { $('.sf-menu').superfish(); });
import axios from 'axios' export default function useHttp(baseURL) { const defaultHeaders = { 'Content-Type': 'application/json', 'X-Requested-With': 'XMLHttpRequest' } const call = (url, method, options = {}) => { const data = options.data || {} const config = options.config || {} const headers = options.headers || {} const promise = axios({ url: url, method: method, baseURL: baseURL, data: data, ...config, headers: { ...defaultHeaders, ...headers }, }) if (options['wantsRawResponse']) { return promise; } return ( promise .then(response => { return response.data }) .catch(error => { console.log(`Fetch error: ${error}`) return error.response ? error.response : error.message }) ) } const get = (url, options = {}) => call(url, 'get', options) const post = (url, data, options = {}) => call(url, 'post', {...{data: data}, ...options}) return { call, get, post } }
define({"topics" : [{"title":"Multihead Ingest","href":"datacollector\/UserGuide\/Destinations\/KineticaDB.html#concept_jx1_25h_sbb","attributes": {"data-id":"concept_jx1_25h_sbb",},"menu": {"hasChildren":false,},"tocID":"concept_jx1_25h_sbb-d46e123188","topics":[]},{"title":"Inserts and Updates","href":"datacollector\/UserGuide\/Destinations\/KineticaDB.html#concept_jhz_bwc_rbb","attributes": {"data-id":"concept_jhz_bwc_rbb",},"menu": {"hasChildren":false,},"tocID":"concept_jhz_bwc_rbb-d46e123210","topics":[]},{"title":"Configuring a KineticaDB Destination","href":"datacollector\/UserGuide\/Destinations\/KineticaDB.html#task_r1q_vxg_qbb","attributes": {"data-id":"task_r1q_vxg_qbb",},"menu": {"hasChildren":false,},"tocID":"task_r1q_vxg_qbb-d46e123242","topics":[]}]});
(function(t){const e=t["zh-cn"]=t["zh-cn"]||{};e.dictionary=Object.assign(e.dictionary||{},{"%0 of %1":"第 %0 步,共 %1 步","Align cell text to the bottom":"使单元格文本对齐到底部","Align cell text to the center":"使单元格文本水平居中","Align cell text to the left":"使单元格文本左对齐","Align cell text to the middle":"使单元格文本垂直居中","Align cell text to the right":"使单元格文本右对齐","Align cell text to the top":"使单元格文本对齐到顶部","Align center":"居中对齐","Align left":"左对齐","Align right":"右对齐","Align table to the left":"使表格左对齐","Align table to the right":"使表格右对齐",Alignment:"对齐","Almost equal to":"约等于",Angle:"角","Approximately equal to":"近似等于",Aquamarine:"海蓝色","Asterisk operator":"星号运算符","Austral sign":"澳大利亚货币符号","back with leftwards arrow above":"带有back标识的向左箭头",Background:"背景",Big:"大","Bitcoin sign":"比特币符号",Black:"黑色","Block quote":"块引用",Blue:"蓝色",Bold:"加粗",Border:"边框","Break text":"","Bulleted List":"项目符号列表","Bulleted list styles toolbar":"项目符号列表样式工具条",Cancel:"取消","Cedi sign":"塞地符号","Cell properties":"单元格属性","Cent sign":"分币符号","Center table":"表格居中","Centered image":"图片居中","Change image text alternative":"更改图片替换文本","Character categories":"字符类别","Choose heading":"标题类型",Circle:"空心圆点","Colon sign":"科朗符号",Color:"颜色","Color picker":"颜色选择器",Column:"列","Contains as member":"包含","Copyright sign":"版权符号","Cruzeiro sign":"克鲁塞罗符号","Currency sign":"货币符号",Dashed:"虚线",Decimal:"阿拉伯数字","Decimal with leading zero":"前导零阿拉伯数字","Decrease indent":"减少缩进",Default:"默认","Degree sign":"度数符号","Delete column":"删除本列","Delete row":"删除本行","Dim grey":"暗灰色",Dimensions:"尺寸",Disc:"实心圆点","Division sign":"除号","Document colors":"文档中的颜色","Dollar sign":"美元符号","Dong sign":"越南盾符号",Dotted:"点状虚线",Double:"双线","Double dagger":"双剑号","Double exclamation mark":"双叹号","Double low-9 quotation mark":"低位后双引号","Double question mark":"双问号",Downloadable:"可下载","downwards arrow to bar":"头部带杠的向下箭头","downwards dashed arrow":"向下虚线箭头","downwards double arrow":"向下双箭头","Drachma sign":"德拉克马符号","Dropdown toolbar":"下拉工具栏","Edit block":"编辑框","Edit link":"修改链接","Edit source":"编辑源代码","Editor toolbar":"编辑器工具栏","Element of":"属于","Em dash":"长破折号","Empty set":"空集","Empty snippet content":"","En dash":"短破折号","end with leftwards arrow above":"带有end标识的向左箭头","Enter image caption":"输入图片标题","Enter table caption":"","Euro sign":"欧元符号","Euro-currency sign":"欧元货币符号","Exclamation question mark":"感叹疑问号","Font Background Color":"字体背景色","Font Color":"字体颜色","Font Size":"字体大小","For all":"对于全部","Fraction slash":"分数斜线","French franc sign":"法国法郎符号","Full size image":"图片通栏显示","German penny sign":"德国便士符号","Greater-than or equal to":"大于等于","Greater-than sign":"大于号",Green:"绿色",Grey:"灰色",Groove:"凹槽边框","Guarani sign":"瓜拉尼货币符号","Header column":"标题列","Header row":"标题行",Heading:"标题","Heading 1":"标题 1","Heading 2":"标题 2","Heading 3":"标题 3","Heading 4":"标题 4","Heading 5":"标题 5","Heading 6":"标题 6",Height:"高度","Horizontal ellipsis":"省略号","Horizontal line":"水平线","Horizontal text alignment toolbar":"水平文本对齐工具栏","Hryvnia sign":"戈里夫纳符号","HTML snippet":"HTML 代码片段",Huge:"极大","Identical to":"恒等于","Image resize list":"图片大小列表","Image toolbar":"图片工具栏","image widget":"图像小部件","In line":"","Increase indent":"增加缩进","Indian rupee sign":"印度卢比符号",Infinity:"无穷大",Insert:"插入","Insert code block":"插入代码块","Insert column left":"左侧插入列","Insert column right":"右侧插入列","Insert HTML":"插入 HTML","Insert image":"插入图像","Insert image via URL":"通过URL地址插入图片","Insert media":"插入媒体","Insert paragraph after block":"在后面插入段落","Insert paragraph before block":"在前面插入段落","Insert row above":"在上面插入一行","Insert row below":"在下面插入一行","Insert table":"插入表格",Inset:"凹边框",Integral:"积分",Intersection:"交集","Inverted exclamation mark":"反感叹号","Inverted question mark":"反问号",Italic:"倾斜",Justify:"两端对齐","Justify cell text":"对齐单元格文本","Kip sign":" 基普符号","Latin capital letter a with breve":"带短音符的大写拉丁字母a","Latin capital letter a with macron":"带长音符的大写拉丁字母a","Latin capital letter a with ogonek":"带反尾形符的大写拉丁字母a","Latin capital letter c with acute":"带锐音符的大写拉丁字母c","Latin capital letter c with caron":"带抑扬符的大写拉丁字母c","Latin capital letter c with circumflex":"带扬抑符的大写拉丁字母c","Latin capital letter c with dot above":"带上点的大写拉丁字母c","Latin capital letter d with caron":"带抑扬符的大写拉丁字母d","Latin capital letter d with stroke":"带删节线的大写拉丁字母d","Latin capital letter e with breve":"带短音符的大写拉丁字母e","Latin capital letter e with caron":"带抑扬符的大写拉丁字母e","Latin capital letter e with dot above":"带上点的大写拉丁字母e","Latin capital letter e with macron":"带长音符的大写拉丁字母e","Latin capital letter e with ogonek":"带反尾形符的大写拉丁字母e","Latin capital letter eng":"大写拉丁字母eng","Latin capital letter g with breve":"带短音符的大写拉丁字母g","Latin capital letter g with cedilla":"带软音符的大写拉丁字母g","Latin capital letter g with circumflex":"带扬抑符的大写拉丁字母g","Latin capital letter g with dot above":"带上点的大写拉丁字母g","Latin capital letter h with circumflex":"带扬抑符的大写拉丁字母h","Latin capital letter h with stroke":"带删节线的大写拉丁字母h","Latin capital letter i with breve":"带短音符的大写拉丁字母i","Latin capital letter i with dot above":"带上点的大写拉丁字母i","Latin capital letter i with macron":"带长音符的大写拉丁字母i","Latin capital letter i with ogonek":"带反尾形符的大写拉丁字母i","Latin capital letter i with tilde":"带腭化符的大写拉丁字母i","Latin capital letter j with circumflex":"带扬抑符的大写拉丁字母j","Latin capital letter k with cedilla":"带软音符的大写拉丁字母k","Latin capital letter l with acute":"带锐音符的大写拉丁字母l","Latin capital letter l with caron":"带抑扬符的大写拉丁字母l","Latin capital letter l with cedilla":"带软音符的大写拉丁字母l","Latin capital letter l with middle dot":"带中点的大写拉丁字母l","Latin capital letter l with stroke":"带删节线的大写拉丁字母l","Latin capital letter n with acute":"带锐音符的大写拉丁字母n","Latin capital letter n with caron":"带抑扬符的大写拉丁字母n","Latin capital letter n with cedilla":"带软音符的大写拉丁字母n","Latin capital letter o with breve":"带短音符的大写拉丁字母o","Latin capital letter o with double acute":"带双锐音符的大写拉丁字母o","Latin capital letter o with macron":"带长音符的大写拉丁字母o","Latin capital letter r with acute":"带锐音符的大写拉丁字母r","Latin capital letter r with caron":"带抑扬符的大写拉丁字母r","Latin capital letter r with cedilla":"带软音符的大写拉丁字母r","Latin capital letter s with acute":"带锐音符的大写拉丁字母s","Latin capital letter s with caron":"带抑扬符的大写拉丁字母s","Latin capital letter s with cedilla":"带软音符的大写拉丁字母s","Latin capital letter s with circumflex":"带扬抑符的大写拉丁字母s","Latin capital letter t with caron":"带抑扬符的大写拉丁字母t","Latin capital letter t with cedilla":"带软音符的大写拉丁字母t","Latin capital letter t with stroke":"带删节线的大写拉丁字母t","Latin capital letter u with breve":"带短音符的大写拉丁字母u","Latin capital letter u with double acute":"带双锐音符的大写拉丁字母u","Latin capital letter u with macron":"带长音符的大写拉丁字母u","Latin capital letter u with ogonek":"带反尾形符的大写拉丁字母u","Latin capital letter u with ring above":"带上圆圈的大写拉丁字母u","Latin capital letter u with tilde":"带腭化符的大写拉丁字母u","Latin capital letter w with circumflex":"带扬抑符的大写拉丁字母w","Latin capital letter y with circumflex":"带扬抑符的大写拉丁字母y","Latin capital letter y with diaeresis":"带分音符的大写拉丁字母y","Latin capital letter z with acute":"带锐音符的大写拉丁字母z","Latin capital letter z with caron":"带抑扬符的大写拉丁字母z","Latin capital letter z with dot above":"带上点的大写拉丁字母z","Latin capital ligature ij":"大写拉丁连字符ij","Latin capital ligature oe":"大写拉丁连字符oe","Latin small letter a with breve":"带短音符的小写拉丁字母a","Latin small letter a with macron":"带长音符的小写拉丁字母a","Latin small letter a with ogonek":"带反尾形符的小写拉丁字母a","Latin small letter c with acute":"带锐音符的小写拉丁字母c","Latin small letter c with caron":"带抑扬符的小写拉丁字母c","Latin small letter c with circumflex":"带扬抑符的小写拉丁字母c","Latin small letter c with dot above":"带上点的小写拉丁字母c","Latin small letter d with caron":"带抑扬符的小写拉丁字母d","Latin small letter d with stroke":"带删节线的小写拉丁字母d","Latin small letter dotless i":"没有点的小写拉丁字母i","Latin small letter e with breve":"带短音符的小写拉丁字母e","Latin small letter e with caron":"带抑扬符的小写拉丁字母e","Latin small letter e with dot above":"带上点的小写拉丁字母e","Latin small letter e with macron":"带长音符的小写拉丁字母e","Latin small letter e with ogonek":"带反尾形符的小写拉丁字母e","Latin small letter eng":"小写拉丁字母eng","Latin small letter f with hook":"带钩的拉丁文小写字母 F","Latin small letter g with breve":"带短音符的小写拉丁字母g","Latin small letter g with cedilla":"带软音符的小写拉丁字母g","Latin small letter g with circumflex":"带扬抑符的小写拉丁字母g","Latin small letter g with dot above":"带上点的小写拉丁字母g","Latin small letter h with circumflex":"带扬抑符的小写拉丁字母h","Latin small letter h with stroke":"带删节线的小写拉丁字母h","Latin small letter i with breve":"带短音符的小写拉丁字母i","Latin small letter i with macron":"带长音符的小写拉丁字母i","Latin small letter i with ogonek":"带反尾形符的小写拉丁字母i","Latin small letter i with tilde":"带腭化符的小写拉丁字母i","Latin small letter j with circumflex":"带扬抑符的小写拉丁字母j","Latin small letter k with cedilla":"带软音符的小写拉丁字母k","Latin small letter kra":"小写拉丁字母kra","Latin small letter l with acute":"带锐音符的小写拉丁字母l","Latin small letter l with caron":"带抑扬符的小写拉丁字母l","Latin small letter l with cedilla":"带软音符的小写拉丁字母l","Latin small letter l with middle dot":"带中点的小写拉丁字母l","Latin small letter l with stroke":"带删节线的小写拉丁字母l","Latin small letter long s":"小写拉丁字母长s","Latin small letter n preceded by apostrophe":"冠以撇号的小写拉丁字母n","Latin small letter n with acute":"带锐音符的小写拉丁字母n","Latin small letter n with caron":"带抑扬符的小写拉丁字母n","Latin small letter n with cedilla":"带软音符的小写拉丁字母n","Latin small letter o with breve":"带短音符的小写拉丁字母o","Latin small letter o with double acute":"带双锐音符的小写拉丁字母o","Latin small letter o with macron":"带长音符的小写拉丁字母o","Latin small letter r with acute":"带锐音符的小写拉丁字母r","Latin small letter r with caron":"带抑扬符的小写拉丁字母r","Latin small letter r with cedilla":"带软音符的小写拉丁字母r","Latin small letter s with acute":"带锐音符的小写拉丁字母s","Latin small letter s with caron":"带抑扬符的小写拉丁字母s","Latin small letter s with cedilla":"带软音符的小写拉丁字母s","Latin small letter s with circumflex":"带扬抑符的小写拉丁字母s","Latin small letter t with caron":"带抑扬符的小写拉丁字母t","Latin small letter t with cedilla":"带软音符的小写拉丁字母t","Latin small letter t with stroke":"带删节线的小写拉丁字母t","Latin small letter u with breve":"带短音符的小写拉丁字母u","Latin small letter u with double acute":"带双锐音符的小写拉丁字母u","Latin small letter u with macron":"带长音符的小写拉丁字母u","Latin small letter u with ogonek":"带反尾形符的小写拉丁字母u","Latin small letter u with ring above":"带上圆圈的小写拉丁字母u","Latin small letter u with tilde":"带腭化符的小写拉丁字母u","Latin small letter w with circumflex":"带扬抑符的小写拉丁字母w","Latin small letter y with circumflex":"带扬抑符的小写拉丁字母y","Latin small letter z with acute":"带锐音符的小写拉丁字母z","Latin small letter z with caron":"带抑扬符的小写拉丁字母z","Latin small letter z with dot above":"带上点的小写拉丁字母z","Latin small ligature ij":"小写拉丁连字符ij","Latin small ligature oe":"小写拉丁连字符oe","Left aligned image":"图片左侧对齐","Left double quotation mark":"左双引号","Left single quotation mark":"左单引号","Left-pointing double angle quotation mark":"双左尖括号","leftwards arrow to bar":"头部带杠的向左箭头","leftwards dashed arrow":"向左虚线箭头","leftwards double arrow":"向左双箭头","Less-than or equal to":"小于等于","Less-than sign":"小于号","Light blue":"浅蓝色","Light green":"浅绿色","Light grey":"浅灰色",Link:"超链接","Link image":"链接图片","Link URL":"链接网址","Lira sign":"里拉符号","Livre tournois sign":"里弗尔符号","Logical and":"逻辑与","Logical or":"逻辑或","Lower-latin":"小写拉丁字母","Lower–roman":"小写罗马数字",Macron:"长音符号","Manat sign":"马纳特符号","Media toolbar":"媒体工具栏","Media URL":"媒体URL","media widget":"媒体小部件","Merge cell down":"向下合并单元格","Merge cell left":"向左合并单元格","Merge cell right":"向右合并单元格","Merge cell up":"向上合并单元格","Merge cells":"合并单元格","Mill sign":"密尔符号","Minus sign":"负号","Multiplication sign":"称号","N-ary product":"N 元乘积","N-ary summation":"N 元求和",Nabla:"劈形算符","Naira sign":"奈拉符号","New sheqel sign":"新谢克尔符号",Next:"下一步","No preview available":"",None:"无","Nordic mark sign":"北欧马克征符号","Not an element of":"不属于","Not equal to":"不等于","Not sign":"非","Numbered List":"项目编号列表","Numbered list styles toolbar":"项目编号列表样式工具条","on with exclamation mark with left right arrow above":"带有NO!标识的左右双向箭头","Open in a new tab":"在新标签页中打开","Open link in new tab":"在新标签页中打开链接",Orange:"橙色",Original:"原始大小",Outset:"凸边框",Overline:"上划线",Padding:"内边距",Paragraph:"段落","Paragraph sign":"段落符号","Partial differential":"偏微分","Paste raw HTML here...":"在这里粘贴 HTML 源代码","Paste the media URL in the input.":"在输入中粘贴媒体URL","Per mille sign":"千分号","Per ten thousand sign":"万分号","Peseta sign":"比塞塔符号","Peso sign":"比索符号","Plain text":"纯文本","Plus-minus sign":"正负号","Pound sign":"英镑符号",Previous:"上一步","Proportional to":"比例",Purple:"紫色","Question exclamation mark":"疑问感叹号",Red:"红色",Redo:"重做","Registered sign":"注册商标","Remove color":"移除颜色","Remove Format":"移除格式","Resize image":"调整图像大小","Resize image to %0":"调整图像大小为0%","Resize image to the original size":"调整图像大小为原始大小","Restore default":"","Reversed paragraph sign":"反向段落符号","Rich Text Editor":"富文本编辑器","Rich Text Editor, %0":"富文本编辑器, %0",Ridge:"垄状边框","Right aligned image":"图片右侧对齐","Right double quotation mark":"右双引号","Right single quotation mark":"右单引号","Right-pointing double angle quotation mark":"双右尖括号","rightwards arrow to bar":"头部带杠的向右箭头","rightwards dashed arrow":"向右虚线箭头","rightwards double arrow":"向右双箭头",Row:"行","Ruble sign":"俄罗斯卢布","Rupee sign":"卢比符号",Save:"保存","Save changes":"保存更改","Section sign":"节标记","Select all":"全选","Select column":"选择列","Select row":"选择行","Show more items":"显示更多","Side image":"图片侧边显示","Single left-pointing angle quotation mark":"单左尖括号","Single low-9 quotation mark":"低位后单引号","Single right-pointing angle quotation mark":"单右尖括号",Small:"小",Solid:"实线","soon with rightwards arrow above":"带有soon标识的向右箭头","Special characters":"特殊字符","Spesmilo sign":"斯佩斯米洛符号","Split cell horizontally":"横向拆分单元格","Split cell vertically":"纵向拆分单元格",Square:"实心方块","Square root":"平方根",Strikethrough:"删除线",Style:"样式",Subscript:"下标",Superscript:"上标","Table alignment toolbar":"表格对齐工具栏","Table cell text alignment":"表格单元格中的文本水平对齐","Table properties":"表格属性","Table toolbar":"表格工具栏","Tenge sign":"坚戈符号","Text alignment":"对齐","Text alignment toolbar":"对齐工具栏","Text alternative":"替换文本",'The color is invalid. Try "#FF0000" or "rgb(255,0,0)" or "red".':'颜色无效。尝试使用"#FF0000"、"rgb(255,0,0)"或者"red"。',"The URL must not be empty.":"URL不可以为空。",'The value is invalid. Try "10px" or "2em" or simply "2".':"无效值。尝试使用“10px”、“2ex”或者只写“2”。","There exists":"存在","This link has no URL":"此链接没有设置网址","This media URL is not supported.":"不支持此媒体URL。","Tilde operator":"波浪线运算符",Tiny:"极小","Tip: Paste the URL into the content to embed faster.":"提示:将URL粘贴到内容中可更快地嵌入","To-do List":"待办列表","Toggle caption off":"","Toggle caption on":"","Toggle the circle list style":"切换空心原点列表样式","Toggle the decimal list style":"切换阿拉伯数字列表样式","Toggle the decimal with leading zero list style":"切换前导零阿拉伯数字列表样式","Toggle the disc list style":"切换实心原点列表样式","Toggle the lower–latin list style":"切换小写拉丁字母列表样式","Toggle the lower–roman list style":"切换小写罗马数字列表样式","Toggle the square list style":"切换实心方块列表样式","Toggle the upper–latin list style":"切换大写拉丁字母列表样式","Toggle the upper–roman list style":"切换大写罗马数字列表样式","top with upwards arrow above":"带有top标识的向上箭头","Trade mark sign":"商标符号","Tugrik sign":"图格里克符号","Turkish lira sign":"土耳其里拉符号",Turquoise:"青色","Two dot leader":"二点前导符",Underline:"下划线",Undo:"撤销",Union:"并集",Unlink:"取消超链接","up down arrow with base":"处于基线的上下箭头",Update:"更新","Update image URL":"更新图片URL地址","Upload failed":"上传失败","Upload in progress":"正在上传","Upper-latin":"大写拉丁字母","Upper-roman":"大写罗马数字","upwards arrow to bar":"头部带杠的向上箭头","upwards dashed arrow":"向上虚线箭头","upwards double arrow":"向上双箭头","Vertical text alignment toolbar":"垂直文本对齐工具栏","Vulgar fraction one half":"普通分数二分之一","Vulgar fraction one quarter":"普通分数四分之一","Vulgar fraction three quarters":"普通分数四分之三",White:"白色","Widget toolbar":"小部件工具栏",Width:"宽度","Won sign":"韩元符号","Wrap text":"",Yellow:"黄色","Yen sign":"日元符号"});e.getPluralForm=function(t){return 0}})(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={}));
import numpy as np import pytest import autoarray as aa from autoarray.inversion.mappers.voronoi import MapperVoronoiNoInterp from autoarray.inversion.mappers.delaunay import MapperDelaunay from autoarray.mock.mock import MockLinearObjFunc # TODO : NEed to figure out how we blur linear light profile with blurring gird. def test__inversion_imaging__via_linear_obj_func(masked_imaging_7x7_no_blur): mask = masked_imaging_7x7_no_blur.mask linear_obj = MockLinearObjFunc( sub_slim_shape=mask.sub_shape_slim, sub_size=mask.sub_size, mapping_matrix=np.full(fill_value=0.5, shape=(9, 1)), ) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[linear_obj], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.linear_obj_list[0], MockLinearObjFunc) assert isinstance(inversion.leq, aa.LEqImagingMapping) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[linear_obj], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.linear_obj_list[0], MockLinearObjFunc) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) def test__inversion_imaging__via_mapper( masked_imaging_7x7_no_blur, rectangular_mapper_7x7_3x3, delaunay_mapper_9_3x3, voronoi_mapper_9_3x3, regularization_constant, ): inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[rectangular_mapper_7x7_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperRectangularNoInterp) assert isinstance(inversion.leq, aa.LEqImagingMapping) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(6.9546, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[rectangular_mapper_7x7_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperRectangularNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(6.9546, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingMapping) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6674, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6674, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingMapping) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6763, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6763, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) def test__inversion_imaging__via_regularizations( masked_imaging_7x7_no_blur, delaunay_mapper_9_3x3, voronoi_mapper_9_3x3, voronoi_mapper_nn_9_3x3, regularization_constant, regularization_constant_split, regularization_adaptive_brightness, regularization_adaptive_brightness_split, ): inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.66747, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_constant_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.52745, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_adaptive_brightness], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 47.410169, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_adaptive_brightness_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 38.956734, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.6763, 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.38417, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_adaptive_brightness], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( -25.71476, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_adaptive_brightness_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( -26.31747, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_nn_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoi) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.66505, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_nn_9_3x3], regularization_list=[regularization_constant_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoi) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 10.37955, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_nn_9_3x3], regularization_list=[regularization_adaptive_brightness], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoi) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 49.63744, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[voronoi_mapper_nn_9_3x3], regularization_list=[regularization_adaptive_brightness_split], settings=aa.SettingsInversion(use_w_tilde=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoi) assert isinstance(inversion.leq, aa.LEqImagingWTilde) assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 34.90782, 1.0e-4 ) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) def test__inversion_imaging__compare_mapping_and_w_tilde_values( masked_imaging_7x7, voronoi_mapper_9_3x3, regularization_constant ): inversion_w_tilde = aa.Inversion( dataset=masked_imaging_7x7, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=True), ) inversion_mapping = aa.Inversion( dataset=masked_imaging_7x7, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False), ) assert inversion_w_tilde.reconstruction == pytest.approx( inversion_mapping.reconstruction, 1.0e-4 ) assert inversion_w_tilde.mapped_reconstructed_image == pytest.approx( inversion_mapping.mapped_reconstructed_image, 1.0e-4 ) assert ( inversion_w_tilde.log_det_curvature_reg_matrix_term == inversion_mapping.log_det_curvature_reg_matrix_term ) def test__inversion_interferometer__via_mapper( interferometer_7_no_fft, rectangular_mapper_7x7_3x3, delaunay_mapper_9_3x3, voronoi_mapper_9_3x3, regularization_constant, ): inversion = aa.Inversion( dataset=interferometer_7_no_fft, linear_obj_list=[rectangular_mapper_7x7_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperRectangularNoInterp) assert isinstance(inversion.leq, aa.LEqInterferometerMapping) assert inversion.mapped_reconstructed_data == pytest.approx( 1.0 + 0.0j * np.ones(shape=(7,)), 1.0e-4 ) assert (np.imag(inversion.mapped_reconstructed_data) < 0.0001).all() assert (np.imag(inversion.mapped_reconstructed_data) > 0.0).all() assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(10.2116, 1.0e-4) inversion = aa.Inversion( dataset=interferometer_7_no_fft, linear_obj_list=[delaunay_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperDelaunay) assert isinstance(inversion.leq, aa.LEqInterferometerMapping) assert inversion.mapped_reconstructed_data == pytest.approx( 1.0 + 0.0j * np.ones(shape=(7,)), 1.0e-4 ) assert (np.imag(inversion.mapped_reconstructed_data) < 0.0001).all() assert (np.imag(inversion.mapped_reconstructed_data) > 0.0).all() assert inversion.log_det_curvature_reg_matrix_term == pytest.approx( 14.49772, 1.0e-4 ) inversion = aa.Inversion( dataset=interferometer_7_no_fft, linear_obj_list=[voronoi_mapper_9_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_w_tilde=False, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperVoronoiNoInterp) assert isinstance(inversion.leq, aa.LEqInterferometerMapping) assert inversion.mapped_reconstructed_data == pytest.approx( 1.0 + 0.0j * np.ones(shape=(7,)), 1.0e-4 ) assert (np.imag(inversion.mapped_reconstructed_data) < 0.0001).all() assert (np.imag(inversion.mapped_reconstructed_data) > 0.0).all() assert inversion.log_det_curvature_reg_matrix_term == pytest.approx(14.4977, 1.0e-4) inversion = aa.Inversion( dataset=interferometer_7_no_fft, linear_obj_list=[rectangular_mapper_7x7_3x3], regularization_list=[regularization_constant], settings=aa.SettingsInversion(use_linear_operators=True, check_solution=False), ) assert isinstance(inversion.mapper_list[0], aa.MapperRectangularNoInterp) assert isinstance(inversion.leq, aa.LEqInterferometerMappingPyLops) def test__inversion_matrices__x2_mappers( masked_imaging_7x7_no_blur, rectangular_mapper_7x7_3x3, voronoi_mapper_9_3x3, regularization_constant, ): inversion = aa.Inversion( dataset=masked_imaging_7x7_no_blur, linear_obj_list=[rectangular_mapper_7x7_3x3, voronoi_mapper_9_3x3], regularization_list=[regularization_constant, regularization_constant], settings=aa.SettingsInversion(check_solution=False), ) assert ( inversion.operated_mapping_matrix[0:9, 0:9] == rectangular_mapper_7x7_3x3.mapping_matrix ).all() assert ( inversion.operated_mapping_matrix[0:9, 9:18] == voronoi_mapper_9_3x3.mapping_matrix ).all() blurred_mapping_matrix = np.hstack( [rectangular_mapper_7x7_3x3.mapping_matrix, voronoi_mapper_9_3x3.mapping_matrix] ) assert inversion.operated_mapping_matrix == pytest.approx( blurred_mapping_matrix, 1.0e-4 ) curvature_matrix = aa.util.leq.curvature_matrix_via_mapping_matrix_from( mapping_matrix=blurred_mapping_matrix, noise_map=inversion.noise_map ) assert inversion.curvature_matrix == pytest.approx(curvature_matrix, 1.0e-4) regularization_matrix_of_reg_0 = regularization_constant.regularization_matrix_from( mapper=rectangular_mapper_7x7_3x3 ) regularization_matrix_of_reg_1 = regularization_constant.regularization_matrix_from( mapper=voronoi_mapper_9_3x3 ) assert ( inversion.regularization_matrix[0:9, 0:9] == regularization_matrix_of_reg_0 ).all() assert ( inversion.regularization_matrix[9:18, 9:18] == regularization_matrix_of_reg_1 ).all() assert (inversion.regularization_matrix[0:9, 9:18] == np.zeros((9, 9))).all() assert (inversion.regularization_matrix[9:18, 0:9] == np.zeros((9, 9))).all() reconstruction_0 = 0.5 * np.ones(9) reconstruction_1 = 0.5 * np.ones(9) assert inversion.reconstruction_dict[rectangular_mapper_7x7_3x3] == pytest.approx( reconstruction_0, 1.0e-4 ) assert inversion.reconstruction_dict[voronoi_mapper_9_3x3] == pytest.approx( reconstruction_1, 1.0e-4 ) assert inversion.reconstruction == pytest.approx( np.concatenate([reconstruction_0, reconstruction_1]), 1.0e-4 ) assert inversion.mapped_reconstructed_data_dict[ rectangular_mapper_7x7_3x3 ] == pytest.approx(0.5 * np.ones(9), 1.0e-4) assert inversion.mapped_reconstructed_data_dict[ voronoi_mapper_9_3x3 ] == pytest.approx(0.5 * np.ones(9), 1.0e-4) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4)
__________________________________________________________________________________________________ sample 40 ms submission class Solution: def numSpecialEquivGroups(self, A: List[str]) -> int: def count(A): ans = [0] * 52 for i, letter in enumerate(A): ans[ord(letter) - ord('a') + 26 * (i%2)] += 1 return tuple(ans) return len({count(word) for word in A}) __________________________________________________________________________________________________ sample 13208 kb submission class Solution: def numSpecialEquivGroups(self, A: List[str]) -> int: temp = ["".join(sorted(a[::2]) + sorted(a[1::2])) for a in A] # print(temp) return len(list(set(temp))) __________________________________________________________________________________________________
/*! * * Super simple wysiwyg editor v0.8.18 * https://summernote.org * * * Copyright 2013- Alan Hong. and other contributors * summernote may be freely distributed under the MIT license. * * Date: 2020-05-20T16:47Z * */ (function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory(); else if(typeof define === 'function' && define.amd) define([], factory); else { var a = factory(); for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i]; } })(window, function() { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function(exports) { /******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); /******/ } /******/ Object.defineProperty(exports, '__esModule', { value: true }); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function(value, mode) { /******/ if(mode & 1) value = __webpack_require__(value); /******/ if(mode & 8) return value; /******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); /******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 43); /******/ }) /************************************************************************/ /******/ ({ /***/ 43: /***/ (function(module, exports) { (function ($) { $.extend($.summernote.lang, { 'ta-IN': { font: { bold: 'தடித்த', italic: 'சாய்வு', underline: 'அடிக்கோடு', clear: 'நீக்கு', height: 'வரி உயரம்', name: 'எழுத்துரு பெயர்', strikethrough: 'குறுக்குக் கோடு', size: 'எழுத்துரு அளவு', superscript: 'மேல் ஒட்டு', subscript: 'கீழ் ஒட்டு' }, image: { image: 'படம்', insert: 'படத்தை செருகு', resizeFull: 'முழு அளவை', resizeHalf: 'அரை அளவை', resizeQuarter: 'கால் அளவை', floatLeft: 'இடப்பக்கமாக வை', floatRight: 'வலப்பக்கமாக வை', floatNone: 'இயல்புநிலையில் வை', shapeRounded: 'வட்டமான வடிவம்', shapeCircle: 'வட்ட வடிவம்', shapeThumbnail: 'சிறு வடிவம்', shapeNone: 'வடிவத்தை நீக்கு', dragImageHere: 'படத்தை இங்கே இழுத்துவை', dropImage: 'படத்தை விடு', selectFromFiles: 'கோப்புகளை தேர்வு செய்', maximumFileSize: 'அதிகபட்ச கோப்பு அளவு', maximumFileSizeError: 'கோப்பு அதிகபட்ச அளவை மீறிவிட்டது', url: 'இணையதள முகவரி', remove: 'படத்தை நீக்கு', original: 'Original' }, video: { video: 'காணொளி', videoLink: 'காணொளி இணைப்பு', insert: 'காணொளியை செருகு', url: 'இணையதள முகவரி', providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion or Youku)' }, link: { link: 'இணைப்பு', insert: 'இணைப்பை செருகு', unlink: 'இணைப்பை நீக்கு', edit: 'இணைப்பை தொகு', textToDisplay: 'காட்சி வாசகம்', url: 'இணையதள முகவரி', openInNewWindow: 'புதிய சாளரத்தில் திறக்க' }, table: { table: 'அட்டவணை', addRowAbove: 'add row above', addRowBelow: 'add row below', addColLeft: 'add column left', addColRight: 'add column right', delRow: 'Delete row', delCol: 'Delete column', delTable: 'Delete table' }, hr: { insert: 'கிடைமட்ட கோடு' }, style: { style: 'தொகுப்பு', p: 'பத்தி', blockquote: 'மேற்கோள்', pre: 'குறியீடு', h1: 'தலைப்பு 1', h2: 'தலைப்பு 2', h3: 'தலைப்பு 3', h4: 'தலைப்பு 4', h5: 'தலைப்பு 5', h6: 'தலைப்பு 6' }, lists: { unordered: 'வரிசையிடாத', ordered: 'வரிசையிட்ட' }, options: { help: 'உதவி', fullscreen: 'முழுத்திரை', codeview: 'நிரலாக்க காட்சி' }, paragraph: { paragraph: 'பத்தி', outdent: 'வெளித்தள்ளு', indent: 'உள்ளே தள்ளு', left: 'இடது சீரமைப்பு', center: 'நடு சீரமைப்பு', right: 'வலது சீரமைப்பு', justify: 'இருபுற சீரமைப்பு' }, color: { recent: 'அண்மை நிறம்', more: 'மேலும்', background: 'பின்புல நிறம்', foreground: 'முன்புற நிறம்', transparent: 'தெளிமையான', setTransparent: 'தெளிமையாக்கு', reset: 'மீட்டமைக்க', resetToDefault: 'இயல்புநிலைக்கு மீட்டமை' }, shortcut: { shortcuts: 'குறுக்குவழி', close: 'மூடு', textFormatting: 'எழுத்து வடிவமைப்பு', action: 'செயல்படுத்து', paragraphFormatting: 'பத்தி வடிவமைப்பு', documentStyle: 'ஆவண பாணி', extraKeys: 'Extra keys' }, help: { 'insertParagraph': 'Insert Paragraph', 'undo': 'Undoes the last command', 'redo': 'Redoes the last command', 'tab': 'Tab', 'untab': 'Untab', 'bold': 'Set a bold style', 'italic': 'Set a italic style', 'underline': 'Set a underline style', 'strikethrough': 'Set a strikethrough style', 'removeFormat': 'Clean a style', 'justifyLeft': 'Set left align', 'justifyCenter': 'Set center align', 'justifyRight': 'Set right align', 'justifyFull': 'Set full align', 'insertUnorderedList': 'Toggle unordered list', 'insertOrderedList': 'Toggle ordered list', 'outdent': 'Outdent on current paragraph', 'indent': 'Indent on current paragraph', 'formatPara': 'Change current block\'s format as a paragraph(P tag)', 'formatH1': 'Change current block\'s format as H1', 'formatH2': 'Change current block\'s format as H2', 'formatH3': 'Change current block\'s format as H3', 'formatH4': 'Change current block\'s format as H4', 'formatH5': 'Change current block\'s format as H5', 'formatH6': 'Change current block\'s format as H6', 'insertHorizontalRule': 'Insert horizontal rule', 'linkDialog.show': 'Show Link Dialog' }, history: { undo: 'மீளமை', redo: 'மீண்டும்' }, specialChar: { specialChar: 'SPECIAL CHARACTERS', select: 'Select Special characters' } } }); })(jQuery); /***/ }) /******/ }); });
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); const object_polyfill_1 = require("../../lib/object-polyfill"); function parseDataFormat2or4(data) { const dataFormat = data[0]; const humidity = data.readUInt8(1) * 0.5; const temperature = data.readInt8(2); const pressure = Math.round((data.readUInt16BE(4) + 50000) / 100); // hPa const ret = { dataFormat, humidity, temperature, pressure, }; if (dataFormat === 4 && data.length >= 6) { ret.beaconID = data[6]; } return ret; } exports.parseDataFormat2or4 = parseDataFormat2or4; function parseDataFormat3(data) { const dataFormat = data[0]; const humidity = data.readUInt8(1) * 0.5; const temperature = data.readInt8(2) + data[3] / 100; const pressure = Math.round((data.readUInt16BE(4) + 50000) / 100); // hPa const acceleration = { x: data.readInt16BE(6), y: data.readInt16BE(8), z: data.readInt16BE(10), }; const battery = data.readUInt16BE(12) / 1000; // mV -> V return { dataFormat, humidity, temperature, pressure, acceleration, battery, }; } exports.parseDataFormat3 = parseDataFormat3; function parseDataFormat5(data) { const dataFormat = data[0]; let temperature = data.readInt16BE(1); temperature = temperature !== 0x8000 ? temperature * 0.005 : undefined; let humidity = data.readUInt16BE(3); humidity = humidity !== 0xffff ? humidity * 0.0025 : undefined; let pressure = data.readUInt16BE(5); pressure = pressure !== 0xffff ? Math.round((pressure + 50000) / 100) : undefined; // hPa let acceleration = { x: data.readInt16BE(7), y: data.readInt16BE(9), z: data.readInt16BE(11), }; acceleration.x = acceleration.x !== 0x8000 ? acceleration.x * 0.001 : undefined; acceleration.y = acceleration.y !== 0x8000 ? acceleration.y * 0.001 : undefined; acceleration.z = acceleration.z !== 0x8000 ? acceleration.z * 0.001 : undefined; if (acceleration.x == undefined && acceleration.y == undefined && acceleration.z == undefined) acceleration = undefined; const power = data.readUInt16BE(13); let battery; let txPower; if ((power >>> 5) !== 2047) battery = (power >>> 5) * 0.001 + 1.6; if ((power & 0b11111) !== 0b11111) txPower = (power & 0b11111) * 2 - 40; let movementCounter = data[15]; if (movementCounter === 0xff) movementCounter = undefined; let sequenceNumber = data.readUInt16BE(16); if (sequenceNumber === 0xffff) sequenceNumber = undefined; let macAddress = data.slice(18, 24).toString("hex"); if (macAddress === "ffffffffffff") macAddress = undefined; return object_polyfill_1.stripUndefinedProperties({ dataFormat, temperature, humidity, pressure, acceleration, battery, txPower, movementCounter, sequenceNumber, macAddress, }); } exports.parseDataFormat5 = parseDataFormat5;
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ 'use strict'; /** * The response to a list metric definitions request. */ class MetricDefinitionsListResult extends Array { /** * Create a MetricDefinitionsListResult. */ constructor() { super(); } /** * Defines the metadata of MetricDefinitionsListResult * * @returns {object} metadata of MetricDefinitionsListResult * */ mapper() { return { required: false, serializedName: 'MetricDefinitionsListResult', type: { name: 'Composite', className: 'MetricDefinitionsListResult', modelProperties: { value: { required: false, readOnly: true, serializedName: '', type: { name: 'Sequence', element: { required: false, serializedName: 'MetricDefinitionElementType', type: { name: 'Composite', className: 'MetricDefinition' } } } } } } }; } } module.exports = MetricDefinitionsListResult;
import sys import torch from torch._C import _add_docstr, _special # type: ignore from torch._torch_docs import common_args # type: ignore Tensor = torch.Tensor entr = _add_docstr(_special.special_entr, r""" entr(input, *, out=None) -> Tensor Computes the entropy on :attr:`input` (as defined below), elementwise. .. math:: \begin{align} \text{entr(x)} = \begin{cases} -x * \ln(x) & x > 0 \\ 0 & x = 0.0 \\ -\infty & x < 0 \end{cases} \end{align} """ + """ Args: input (Tensor): the input tensor. Keyword args: out (Tensor, optional): the output tensor. Example:: >>> a = torch.arange(-0.5, 1, 0.5) >>> a tensor([-0.5000, 0.0000, 0.5000]) >>> torch.special.entr(a) tensor([ -inf, 0.0000, 0.3466]) """) gammaln = _add_docstr(_special.special_gammaln, r""" gammaln(input, *, out=None) -> Tensor Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. .. math:: \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) """ + """ Args: {input} Keyword args: {out} Example:: >>> a = torch.arange(0.5, 2, 0.5) >>> torch.special.gammaln(a) tensor([ 0.5724, 0.0000, -0.1208]) """.format(**common_args)) erf = _add_docstr(_special.special_erf, r""" erf(input, *, out=None) -> Tensor Computes the error function of :attr:`input`. The error function is defined as follows: .. math:: \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erf(torch.tensor([0, -1., 10.])) tensor([ 0.0000, -0.8427, 1.0000]) """.format(**common_args)) erfc = _add_docstr(_special.special_erfc, r""" erfc(input, *, out=None) -> Tensor Computes the complementary error function of :attr:`input`. The complementary error function is defined as follows: .. math:: \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erfc(torch.tensor([0, -1., 10.])) tensor([ 1.0000, 1.8427, 0.0000]) """.format(**common_args)) erfinv = _add_docstr(_special.special_erfinv, r""" erfinv(input, *, out=None) -> Tensor Computes the inverse error function of :attr:`input`. The inverse error function is defined in the range :math:`(-1, 1)` as: .. math:: \mathrm{erfinv}(\mathrm{erf}(x)) = x """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) tensor([ 0.0000, 0.4769, -inf]) """.format(**common_args)) logit = _add_docstr(_special.special_logit, r""" logit(input, eps=None, *, out=None) -> Tensor Returns a new tensor with the logit of the elements of :attr:`input`. :attr:`input` is clamped to [eps, 1 - eps] when eps is not None. When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. .. math:: \begin{align} y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ z_{i} &= \begin{cases} x_{i} & \text{if eps is None} \\ \text{eps} & \text{if } x_{i} < \text{eps} \\ x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} \end{cases} \end{align} """ + r""" Args: {input} eps (float, optional): the epsilon for input clamp bound. Default: ``None`` Keyword args: {out} Example:: >>> a = torch.rand(5) >>> a tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) >>> torch.special.logit(a, eps=1e-6) tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) """.format(**common_args)) expit = _add_docstr(_special.special_expit, r""" expit(input, *, out=None) -> Tensor Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. .. math:: \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} """ + r""" Args: {input} Keyword args: {out} Example:: >>> t = torch.randn(4) >>> t tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) >>> torch.special.expit(t) tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) """.format(**common_args)) exp2 = _add_docstr(_special.special_exp2, r""" exp2(input, *, out=None) -> Tensor Computes the base two exponential function of :attr:`input`. .. math:: y_{i} = 2^{x_{i}} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) tensor([ 1., 2., 8., 16.]) """.format(**common_args)) expm1 = _add_docstr(_special.special_expm1, r""" expm1(input, *, out=None) -> Tensor Computes the exponential of the elements minus 1 of :attr:`input`. .. math:: y_{i} = e^{x_{i}} - 1 .. note:: This function provides greater precision than exp(x) - 1 for small values of x. """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) tensor([ 0., 1.]) """.format(**common_args)) i0e = _add_docstr(_special.special_i0e, r""" i0e(input, *, out=None) -> Tensor Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) for each element of :attr:`input`. .. math:: \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) """.format(**common_args))
# -*- coding: utf-8 -*- from h import models, security from h.util.db import lru_cache_in_transaction PREFIX = "6879-" class DeveloperTokenService: """A service for retrieving and performing common operations on developer tokens.""" def __init__(self, session): """ Create a new developer token service. :param session: the SQLAlchemy session object """ self.session = session self._cached_fetch = lru_cache_in_transaction(self.session)(self._fetch) def fetch(self, userid): """ Fetch a developer token by its userid. :param userid: The userid, typically of the currently authenticated user. :type userid: unicode :returns: a token instance, if found :rtype: h.models.Token or None """ return self._cached_fetch(userid) def create(self, userid): """ Creates a developer token for the given userid. :param userid: The userid for which the developer token gets created. :type userid: unicode :returns: a token instance :rtype: h.models.Token """ token = models.Token(userid=userid, value=self._generate_token()) self.session.add(token) return token def regenerate(self, token): """ Regenerates a developer token. The implementation changes the token value in-place, however when calling this method you should not rely on this implementation detail. You should use the return value of this method as the new token object. :param token: The token instance which needs to be regenerated. :type token: h.models.Token :returns: a regenerated token instance :rtype: h.models.Token """ token.value = self._generate_token() return token def _fetch(self, userid): if userid is None: return None return ( self.session.query(models.Token) .filter_by(userid=userid, authclient=None) .order_by(models.Token.created.desc()) .one_or_none() ) def _generate_token(self): return PREFIX + security.token_urlsafe() def developer_token_service_factory(context, request): return DeveloperTokenService(request.db)
import { addParameters } from "@storybook/react"; import "!style-loader!css-loader!sass-loader!./styles.scss"; const KINDS = [ "Foundation/Introduction", "Foundation/Colors", "Foundation/Icons", "Layout/Grid" ]; addParameters({ options: { /** * display the top-level grouping as a "root" in the sidebar * @type {Boolean} */ showRoots: true, } });
const moment = require('moment'); const mongoose = require('mongoose'); const Schema = mongoose.Schema; const TrackSchema = new Schema({ name: = { type: String, required: true}, album: {type: Schema.ObjectId, ref: 'Album', required: true}, rating: {type: Number, required: true, enum:[1, 2, 3, 4, 5], default: 3}, track_number: {type: Number, max: 3}, }); // virtual for track's url TrackSchema .virtual('url') .get(function () { return '/popular/track' + this._id; }); // virtual for track time TrackSchema // set trak time format after url property .virtual('track_time') .get(function () { // return the moment of track_time w/ track_number in hour, minute, second return moment(track_number).format('hh:mm:ss'); }) //export module.exports = mongoose.model('Track', TrackSchema);
/* * Copyright 2014 msedova. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ module.exports = function(grunt) { grunt.initConfig({ pkg: grunt.file.readJSON('package.json'), clean: ["build/", "dist/"], uglify: { options: { banner: '/*! <%= pkg.name %> <%= pkg.version %>\n<%= grunt.template.today("yyyy-mm-dd") %> */\n' }, build: { src: 'src/<%= pkg.name %>.js', dest: 'build/<%= pkg.name %>.<%= pkg.version %>.min.js' } }, jsdoc : { main : { src: ['src/*.js', 'test/*.js'], options: { destination: 'doc', configure : "./conf/jsdoc.conf.json", template : "./node_modules/grunt-jsdoc/node_modules/ink-docstrap/template" } } }, // make a zipfile for "download" link compress: { main: { options: { archive: 'dist/Protael.zip' }, files: [ {expand: true, src: ['*'], cwd: 'build', dest: '<%= pkg.name %>/js'}, // minified js {expand: true, src: ['*'], cwd: 'lib', dest: '<%= pkg.name %>/js/vendor'}, // js libraries {expand: true, src: ['protael.css'], cwd: 'css', dest: '<%= pkg.name %>/css', filter: 'isFile'}, // css {src: ['./protaelSeed.html'], dest: '<%= pkg.name %>/'} // template ] } }, copy: { main: { files: [{expand: true, cwd: 'src/', src: '<%= pkg.name %>.js', dest: 'dist/', flatten: true, filter: 'isFile' },{expand: true, cwd: 'build/', src: '*.js', dest: 'dist/', flatten: true, filter: 'isFile' },{expand: true, cwd: 'css/', src: '*.css', dest: 'dist/', flatten: true, filter: 'isFile' } ] } } }); grunt.loadNpmTasks('grunt-contrib-clean'); grunt.loadNpmTasks('grunt-contrib-copy'); grunt.loadNpmTasks('grunt-contrib-uglify'); grunt.loadNpmTasks('grunt-contrib-compress'); grunt.loadNpmTasks('grunt-contrib-concat'); grunt.loadNpmTasks('grunt-jsdoc'); // Default task(s). grunt.registerTask('default', ['clean', 'jsdoc','uglify', 'compress', 'copy']); };
# Copyright 2018 Nordic Semiconductor ASA # Copyright 2017 Linaro Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image signing and management. """ from . import version as versmod from intelhex import IntelHex import hashlib import struct import os.path from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes IMAGE_MAGIC = 0x96f3b83d IMAGE_HEADER_SIZE = 32 BIN_EXT = "bin" INTEL_HEX_EXT = "hex" DEFAULT_MAX_SECTORS = 128 # Image header flags. IMAGE_F = { 'PIC': 0x0000001, 'NON_BOOTABLE': 0x0000010, 'ENCRYPTED': 0x0000004, } TLV_VALUES = { 'KEYHASH': 0x01, 'SHA256': 0x10, 'RSA2048': 0x20, 'ECDSA224': 0x21, 'ECDSA256': 0x22, 'ENCRSA2048': 0x30, 'ENCKW128': 0x31, 'RSA2048_TRAIL': 0x40, 'ECDSA224_TRAIL': 0x41, 'ECDSA256_TRAIL': 0x42, 'ENCRSA2048_TRAIL': 0x50, 'ENCKW128_TRAIL': 0x51, 'RSA2048_PUBKEY': 0x60, 'ECDSA224_PUBKEY': 0x61, 'ECDSA256_PUBKEY': 0x62, 'ENCRSA2048_PUBKEY': 0x70, 'ENCKW128_PUBKEY': 0x71, 'PUBLICKEY': 0x80, 'EXTFILE_HASH' : 0x81, } TLV_INFO_SIZE = 4 TLV_INFO_MAGIC = 0x6907 boot_magic = bytes([ 0x77, 0xc2, 0x95, 0xf3, 0x60, 0xd2, 0xef, 0x7f, 0x35, 0x52, 0x50, 0x0f, 0x2c, 0xb6, 0x79, 0x80, ]) STRUCT_ENDIAN_DICT = { 'little': '<', 'big': '>' } class TLV(): def __init__(self, endian): self.buf = bytearray() self.endian = endian def add(self, kind, payload): """Add a TLV record. Kind should be a string found in TLV_VALUES above.""" e = STRUCT_ENDIAN_DICT[self.endian] buf = struct.pack(e + 'BBH', TLV_VALUES[kind], 0, len(payload)) self.buf += buf self.buf += payload def get(self): e = STRUCT_ENDIAN_DICT[self.endian] header = struct.pack(e + 'HH', TLV_INFO_MAGIC, TLV_INFO_SIZE + len(self.buf)) return header + bytes(self.buf) def get_with_sign(self,key): sha = hashlib.sha256() sha.update(self.buf) trail_hash = sha.digest() sig = key.sign(bytes(trail_hash)) self.add(key.sig_tlv() + "_TRAIL", sig) return self.get() class Image(): def __init__(self, version=None, header_size=IMAGE_HEADER_SIZE, pad_header=False, pad=False, align=1, slot_size=0, max_sectors=DEFAULT_MAX_SECTORS, overwrite_only=False, endian="little",hashtrail=False): self.version = version or versmod.decode_version("0") self.header_size = header_size self.pad_header = pad_header self.pad = pad self.align = align self.slot_size = slot_size self.max_sectors = max_sectors self.overwrite_only = overwrite_only self.endian = endian self.hashtrail = hashtrail self.base_addr = None self.payload = [] def __repr__(self): return "<Image version={}, header_size={}, base_addr={}, \ align={}, slot_size={}, max_sectors={}, overwrite_only={}, \ endian={} format={}, payloadlen=0x{:x}>".format( self.version, self.header_size, self.base_addr if self.base_addr is not None else "N/A", self.align, self.slot_size, self.max_sectors, self.overwrite_only, self.endian, self.hashtrail, self.__class__.__name__, len(self.payload)) def load(self, path): """Load an image from a given file""" ext = os.path.splitext(path)[1][1:].lower() if ext == INTEL_HEX_EXT: ih = IntelHex(path) self.payload = ih.tobinarray() self.base_addr = ih.minaddr() else: with open(path, 'rb') as f: self.payload = f.read() # Add the image header if needed. if self.pad_header and self.header_size > 0: if self.base_addr: # Adjust base_addr for new header self.base_addr -= self.header_size self.payload = (b'\000' * self.header_size) + self.payload self.check() def save(self, path): """Save an image from a given file""" if self.pad: self.pad_to(self.slot_size) ext = os.path.splitext(path)[1][1:].lower() if ext == INTEL_HEX_EXT: # input was in binary format, but HEX needs to know the base addr if self.base_addr is None: raise Exception("Input file does not provide a base address") h = IntelHex() h.frombytes(bytes=self.payload, offset=self.base_addr) h.tofile(path, 'hex') else: with open(path, 'wb') as f: f.write(self.payload) def check(self): """Perform some sanity checking of the image.""" # If there is a header requested, make sure that the image # starts with all zeros. if self.header_size > 0: if any(v != 0 for v in self.payload[0:self.header_size]): raise Exception("Padding requested, but image does not start with zeros") if self.pad == False and self.slot_size > 0: # no padding is neccesry tsize = self._trailer_size(self.align, self.max_sectors, self.overwrite_only) padding = self.slot_size - (len(self.payload) + tsize) if padding < 0: msg = "Image size (0x{:x}) + trailer (0x{:x}) exceeds requested size 0x{:x}".format( len(self.payload), tsize, self.slot_size) raise Exception(msg) def create(self, key, enckey, key2, extFile): self.add_header(enckey) tlv = TLV(self.endian) if key2 is not None and key is not None: pub = key.get_public_bytes() tlv.add('PUBLICKEY', pub) sha = hashlib.sha256() sha.update(pub) pubbytes = sha.digest() sig2 = key2.sign(bytes(pubbytes)) tlv.add(key2.sig_tlv() + "_PUBKEY", sig2) # Note that ecdsa wants to do the hashing itself, which means # we get to hash it twice. sha = hashlib.sha256() sha.update(self.payload) digest = sha.digest() tlv.add('SHA256', digest) if enckey is not None: plainkey = os.urandom(16) cipherkey = enckey._get_public().encrypt( plainkey, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None)) tlv.add('ENCRSA2048', cipherkey) nonce = bytes([0] * 16) cipher = Cipher(algorithms.AES(plainkey), modes.CTR(nonce), backend=default_backend()) encryptor = cipher.encryptor() img = bytes(self.payload[self.header_size:]) self.payload[self.header_size:] = encryptor.update(img) + \ encryptor.finalize() if extFile is not None: f = open(extFile, 'rb') sha = hashlib.sha256() sha.update(f.read()) digest = sha.digest() tlv.add('EXTFILE_HASH', digest) ########## must be LAST TLV Value############# if self.hashtrail == False: if key is not None: pub = key.get_public_bytes() sha = hashlib.sha256() sha.update(pub) pubbytes = sha.digest() tlv.add('KEYHASH', pubbytes) sig = key.sign(bytes(self.payload)) tlv.add(key.sig_tlv(), sig) self.payload += tlv.get() else: self.payload += tlv.get_with_sign(key) def add_header(self, enckey): """Install the image header.""" flags = 0 if enckey is not None: flags |= IMAGE_F['ENCRYPTED'] e = STRUCT_ENDIAN_DICT[self.endian] fmt = (e + # type ImageHdr struct { 'I' + # Magic uint32 'I' + # LoadAddr uint32 'H' + # HdrSz uint16 'H' + # Pad1 uint16 'I' + # ImgSz uint32 'I' + # Flags uint32 'BBHI' + # Vers ImageVersion 'I' # Pad2 uint32 ) # } assert struct.calcsize(fmt) == IMAGE_HEADER_SIZE header = struct.pack(fmt, IMAGE_MAGIC, 0, # LoadAddr self.header_size, 0, # Pad1 len(self.payload) - self.header_size, # ImageSz flags, # Flags self.version.major, self.version.minor or 0, self.version.revision or 0, self.version.build or 0, 0) # Pad2 self.payload = bytearray(self.payload) self.payload[:len(header)] = header def _trailer_size(self, write_size, max_sectors, overwrite_only): # NOTE: should already be checked by the argument parser if overwrite_only: return 8 * 2 + 16 else: if write_size not in set([1, 2, 4, 8]): raise Exception("Invalid alignment: {}".format(write_size)) m = DEFAULT_MAX_SECTORS if max_sectors is None else max_sectors return m * 3 * write_size + 8 * 2 + 16 def pad_to(self, size): """Pad the image to the given size, with the given flash alignment.""" tsize = self._trailer_size(self.align, self.max_sectors, self.overwrite_only) padding = size - (len(self.payload) + tsize) pbytes = b'\xff' * padding pbytes += b'\xff' * (tsize - len(boot_magic)) pbytes += boot_magic self.payload += pbytes
"use strict"; /* eslint-env jquery */ jQuery(document).ready(function ($) { /** * Force the superfish menu to align top instead of drop down */ $('#menu .menu li > ul').each(function () { var position = $(this).parent().position(); $(this).css('top', 4 - position.top); }); /** * Trigger an initial resize */ $(window).resize(); }); //# sourceMappingURL=main.js.map
import logging log_handler = logging.getLogger(__name__) _DEFAULT_LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' _SHARED = None class Logger: @staticmethod def debug(message): log_handler.info(message) def configure_logging(): log_handler.setLevel(logging.DEBUG) ch = logging.StreamHandler() formatter = logging.Formatter(_DEFAULT_LOG_FORMAT, datefmt='%H:%M:%S',) ch.setFormatter(formatter) log_handler.addHandler(ch)
#include "communication.h" #include "defs.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <string.h> int replyToClient_error(unsigned int pid, int error_status) { char fifo_name[WIDTH_FIFO_NAME + 1]; snprintf(fifo_name, WIDTH_FIFO_NAME+1, CLIENT_ANSWER_PREFIX "%0" MACRO_STRINGIFY(WIDTH_PID) "u", pid); int fifo_fd = open(fifo_name, O_WRONLY); if(fifo_fd == -1) { //Error opening answer fifo, client probably timed out fprintf(stderr, "Client %u probably timed out\n", pid); return -1; } char * msg = NULL; if(asprintf(&msg, "%d\n", error_status) == -1) { fprintf(stderr, "Error in asprintf for replying to client!\n"); return -2; } if(write(fifo_fd, msg, strlen(msg)) < 0) { fprintf(stderr, "Error in write, in replying to client with id %u\n", pid); return -3; } close(fifo_fd); free(msg); return 0; } int replyToClient_success(unsigned int pid, int n_reserved_seats, unsigned int reserved_seats[]) { if(n_reserved_seats <= 0) { return -1; } char fifo_name[WIDTH_FIFO_NAME + 1]; snprintf(fifo_name, WIDTH_FIFO_NAME+1, CLIENT_ANSWER_PREFIX "%0" MACRO_STRINGIFY(WIDTH_PID) "u", pid); int fifo_fd = open(fifo_name, O_WRONLY); if(fifo_fd == -1) { //Error opening answer fifo, client probably timed out fprintf(stderr, "Client %u probably timed out\n", pid); return -2; } char msg[4096]; msg[0] = '\0'; char single_num[WIDTH_SEAT+1]; snprintf(single_num, WIDTH_SEAT+1, "%u", reserved_seats[0]); strncat(msg, single_num, WIDTH_SEAT+1); int i; for(i = 1; i < n_reserved_seats; ++i) { strcat(msg, " "); snprintf(single_num, WIDTH_SEAT+1, "%u", reserved_seats[i]); strncat(msg, single_num, WIDTH_SEAT+1); } if(write(fifo_fd, msg, strlen(msg)) < 0) { fprintf(stderr, "Error in write, in replying to client with id %u\n", pid); return -3; } close(fifo_fd); return 0; }
import pandas as pd import hillmaker as hm file_stopdata = '../data/ShortStay2.csv' # Required inputs scenario = 'ShortStay2_PatTypeSeverity' in_fld_name = 'InRoomTS' out_fld_name = 'OutRoomTS' cat_fld_name = ['PatType', 'Severity'] start = '1/1/1996' end = '3/30/1996 23:45' # Optional inputs bin_mins = 60 totals=2 verbose = 1 stops_df = pd.read_csv(file_stopdata, parse_dates=[in_fld_name, out_fld_name]) hills = hm.make_hills(scenario, stops_df, in_fld_name, out_fld_name, start, end, cat_fld_name, bin_mins, cat_to_exclude=None, nonstationary_stats=nonstationary_stats, stationary_stats=stationary_stats, totals=totals, export_bydatetime_csv=True, export_summaries_csv=True, export_path='./output', verbose=1)
from setuptools import setup import sys def get_readme_md_contents(): """read the contents of your README file""" with open("README.md") as f: long_description = f.read() return long_description install_reqs = ["decorator>=3.3.2", "requests>=2.6.0"] if [sys.version_info[0], sys.version_info[1]] < [2, 7]: install_reqs.append("argparse>=1.2") setup( name="datadog", version="0.28.0", install_requires=install_reqs, tests_require=["nose", "mock"], packages=["datadog", "datadog.api", "datadog.dogstatsd", "datadog.threadstats", "datadog.util", "datadog.dogshell"], author="Datadog, Inc.", long_description=get_readme_md_contents(), long_description_content_type="text/markdown", author_email="dev@datadoghq.com", description="The Datadog Python library", license="BSD", keywords="datadog", url="https://www.datadoghq.com", entry_points={"console_scripts": ["dog = datadog.dogshell:main", "dogwrap = datadog.dogshell.wrap:main"]}, test_suite="tests", classifiers=[ "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: PyPy", ], )
import logging __version__ = "0.19.0rc29" version = tuple(__version__.split('.')) logging.getLogger(__name__).addHandler(logging.NullHandler())
from django.urls import path from flights import views app_name = 'flights' urlpatterns = [ path('',views.home, name = 'home'), path('login',views.login, name = 'login'), path('home/<str:name>',views.home1, name = 'home1'), path('forum/<str:name>',views.forum, name = 'forum'), path('notes/<str:name>',views.notes, name = 'notes'), path('course/<str:foo>',views.course, name = 'course'), path('userlogin',views.userlogin, name = 'userlogin'), path('usersignin',views.usersignin, name = 'usersignin'), path('teacherlogin',views.teacherlogin, name = 'teacherlogin'), path('teachersignin',views.teachersignin, name = 'teachersignin'), path('course',views.course, name = 'flights'), path('download',views.download, name = 'download'), path('upcoming/<str:name>/<str:sub>',views.upcoming, name = 'upcoming'), path('popular/<str:name>',views.popular, name = 'popular'), path('thisweek/<str:name>',views.thisweek, name = 'thisweek'), path('profile/<str:foo>',views.profile, name = 'profile'), path('live/<str:name>/<str:sub>',views.live, name = 'live'), path('select',views.select, name = 'select'), path('select1',views.select1, name = 'select1'), ]
/* ************************************ */ /* Define helper functions */ /* ************************************ */ ITIs = [0,0.136,0.136,0.612] var get_ITI = function() { return 4500 + ITIs.shift()*1000 } var randomDraw = function(lst) { var index = Math.floor(Math.random() * (lst.length)) return lst[index] } /* ************************************ */ /* Define experimental variables */ /* ************************************ */ // task specific variables var choices = [37, 40] var bonus_list = [] //keeps track of choices for bonus //hard coded options var options = { small_amt: [20,20,20,20], large_amt: [50, 100, 100, 30], later_del: [40, 40, 80, 20] } var stim_html = [] //loop through each option to create html for (var i = 0; i < options.small_amt.length; i++) { stim_html[i] = '<div class = dd-stim><div class = amtbox style = "color:white">'+options.large_amt[i]+'\u20AC</div><br><br>'+ '<div class = delbox style = "color:white">'+ options.later_del[i]+' days</div></div>' } data_prop = [] for (var i = 0; i < options.small_amt.length; i++) { data_prop.push({ small_amount: options.small_amt[i], large_amount: options.large_amt[i], later_delay: options.later_del[i] }); } trials = [] //used new features to include the stimulus properties in recorded data for (var i = 0; i < stim_html.length; i++) { trials.push({ stimulus: stim_html[i], data: data_prop[i] }); } /* ************************************ */ /* Set up jsPsych blocks */ /* ************************************ */ var instructions_block = { type: 'poldrack-single-stim', stimulus: '<div class = centerbox><div class = center-text></font size="+2">Choisissez entre <strong>20\u20AC aujourd\'hui</strong> ou l\'option presentee.<br><strong>Index:</strong> Accepter l\'option a l\'ecran (rejeter 20\u20AC aujourd\'hui). <br><strong>Majeur:</strong> Rejeter l\'option a l\'ecran (accepter 20\u20AC today)</font></div></div>', is_html: true, timing_stim: -1, timing_response: -1, response_ends_trial: true, choices: [32], data: { trial_id: "instructions", }, timing_post_trial: 500 }; var start_test_block = { type: 'poldrack-single-stim', stimulus: '<div class = centerbox><div class = center-text>Preparez-vous</p></div>', is_html: true, choices: 'none', timing_stim: 1500, timing_response: 1500, data: { trial_id: "test_start_block" }, timing_post_trial: 500, on_finish: function() { current_trial = 0 exp_stage = 'test' } }; var end_block = { type: 'poldrack-single-stim', stimulus: '<div class = centerbox><div class = center-text><i>Fin</i></div></div>', is_html: true, choices: [32], timing_response: -1, response_ends_trial: true, data: { trial_id: "end", exp_id: 'discount_fixed' }, timing_post_trial: 0, on_finish: function() { var bonus = randomDraw(bonus_list) jsPsych.data.addDataToLastTrial({'bonus': bonus}) } }; //Set up experiment var discount_fixed_experiment = [] discount_fixed_experiment.push(instructions_block); discount_fixed_experiment.push(start_test_block); for (i = 0; i < options.small_amt.length; i++) { var test_block = { type: 'poldrack-single-stim', data: { trial_id: "stim", exp_stage: "test" }, stimulus:trials[i].stimulus, timing_stim: 4000, timing_response: get_ITI, data: trials[i].data, is_html: true, choices: choices, response_ends_trial: false, timing_post_trial: 0, on_finish: function(data) { var choice = false; if (data.key_press == 37) { choice = 'larger_later'; bonus_list.push({'amount': data.large_amount, 'delay': data.later_delay}) } else if (data.key_press == 40) { choice = 'smaller_sooner'; bonus_list.push({'amount': data.small_amount, 'delay': 0}) } jsPsych.data.addDataToLastTrial({ choice: choice }); } }; discount_fixed_experiment.push(test_block) } discount_fixed_experiment.push(end_block);
(function() { 'use strict'; var toFixed = fabric.util.toFixed; /** * Pattern class * @class fabric.Pattern * @see {@link http://fabricjs.com/patterns|Pattern demo} * @see {@link http://fabricjs.com/dynamic-patterns|DynamicPattern demo} * @see {@link fabric.Pattern#initialize} for constructor definition */ fabric.Pattern = fabric.util.createClass(/** @lends fabric.Pattern.prototype */ { /** * Repeat property of a pattern (one of repeat, repeat-x, repeat-y or no-repeat) * @type String * @default */ repeat: 'repeat', /** * Pattern horizontal offset from object's left/top corner * @type Number * @default */ offsetX: 0, /** * Pattern vertical offset from object's left/top corner * @type Number * @default */ offsetY: 0, /** * Constructor * @param {Object} [options] Options object * @param {Function} [callback] function to invoke after callback init. * @return {fabric.Pattern} thisArg */ initialize: function(options, callback) { options || (options = { }); this.id = fabric.Object.__uid++; this.setOptions(options); if (!options.source || (options.source && typeof options.source !== 'string')) { callback && callback(this); return; } // function string if (typeof fabric.util.getFunctionBody(options.source) !== 'undefined') { this.source = new Function(fabric.util.getFunctionBody(options.source)); callback && callback(this); } else { // img src string var _this = this; this.source = fabric.util.createImage(); fabric.util.loadImage(options.source, function(img) { _this.source = img; callback && callback(_this); }); } }, /** * Returns object representation of a pattern * @param {Array} [propertiesToInclude] Any properties that you might want to additionally include in the output * @return {Object} Object representation of a pattern instance */ toObject: function(propertiesToInclude) { var NUM_FRACTION_DIGITS = fabric.Object.NUM_FRACTION_DIGITS, source, object; // callback if (typeof this.source === 'function') { source = String(this.source); } // <img> element else if (typeof this.source.src === 'string') { source = this.source.src; } // <canvas> element else if (typeof this.source === 'object' && this.source.toDataURL) { source = this.source.toDataURL(); } object = { type: 'pattern', source: source, repeat: this.repeat, offsetX: toFixed(this.offsetX, NUM_FRACTION_DIGITS), offsetY: toFixed(this.offsetY, NUM_FRACTION_DIGITS), }; fabric.util.populateWithProperties(this, object, propertiesToInclude); return object; }, /* _TO_SVG_START_ */ /** * Returns SVG representation of a pattern * @param {fabric.Object} object * @return {String} SVG representation of a pattern */ toSVG: function(object) { var patternSource = typeof this.source === 'function' ? this.source() : this.source, patternWidth = patternSource.width / object.width, patternHeight = patternSource.height / object.height, patternOffsetX = this.offsetX / object.width, patternOffsetY = this.offsetY / object.height, patternImgSrc = ''; if (this.repeat === 'repeat-x' || this.repeat === 'no-repeat') { patternHeight = 1; } if (this.repeat === 'repeat-y' || this.repeat === 'no-repeat') { patternWidth = 1; } if (patternSource.src) { patternImgSrc = patternSource.src; } else if (patternSource.toDataURL) { patternImgSrc = patternSource.toDataURL(); } return '<pattern id="SVGID_' + this.id + '" x="' + patternOffsetX + '" y="' + patternOffsetY + '" width="' + patternWidth + '" height="' + patternHeight + '">\n' + '<image x="0" y="0"' + ' width="' + patternSource.width + '" height="' + patternSource.height + '" xlink:href="' + patternImgSrc + '"></image>\n' + '</pattern>\n'; }, /* _TO_SVG_END_ */ setOptions: function(options) { for (var prop in options) { this[prop] = options[prop]; } }, /** * Returns an instance of CanvasPattern * @param {CanvasRenderingContext2D} ctx Context to create pattern * @return {CanvasPattern} */ toLive: function(ctx) { var source = typeof this.source === 'function' ? this.source() : this.source; // if the image failed to load, return, and allow rest to continue loading if (!source) { return ''; } // if an image if (typeof source.src !== 'undefined') { if (!source.complete) { return ''; } if (source.naturalWidth === 0 || source.naturalHeight === 0) { return ''; } } return ctx.createPattern(source, this.repeat); } }); })();
var Dispatcher = require('flux').Dispatcher; module.exports = new Dispatcher();
from flask import request, jsonify, session from tqdm import tqdm import traceback, json, time, os, datetime, threading, pywfom import numpy as np from . import api from .. import models from ...devices.arduino import Arduino from ...devices.camera import Camera DEFAULT_FILE = { "directory":os.environ['PYWFOM_DIR'] if 'PYWFOM_DIR' in os.environ else None, "number_of_runs":"", "run_length":"", "run_length_unit":"sec" } # ****** Create Controllable System ******** class _SystemException(Exception): pass class _System(object): """docstring for _System.""" def __init__(self): self.arduino = Arduino() self.cameras = [] self.file = DEFAULT_FILE self.acquiring = False self.username = None self.mouse = None self.write_speed = 0 self.primary_framerate = 0 def benchmark_disk(self): pass def set_from_file(self, path): # Clear existing settings self.delete() # Start system from specified path, otherwise ignore with open(path, 'r') as f: settings = json.load(f) self.post(None, settings) f.close() def set_from_user_default(self, user, pwd): # Clear existing settings self.delete() self.username = user # Retrieve settings from MongoDB default = models.User.objects(username=user, password=pwd).get().default # Post the settings self.post(id=None, settings=json.loads(default.to_json())) def get(self, setting=None): resp = { "file":self.file, "cameras":[cam.json() for cam in self.cameras], "arduino":self.arduino.json() if self.arduino else {}, "username":self.username, "mouse":self.mouse } if not setting: return resp elif setting in resp: return resp[setting] else: return self.cameras[int(setting)].json() def delete(self, id=None): if id == None: self.file = {} _ = self.arduino.close() if self.arduino else None self.arduino = None [cam.close() for cam in self.cameras] self.cameras = [] elif id == 'file': self.file = {} elif id == 'arduino': _ = self.arduino.close() if self.arduino else None self.arduino = None elif id == 'cameras': [cam.close() for cam in self.cameras] self.cameras = [] else: cam = self.cameras.pop(int(id)) cam.close() return f"Successfully delete {id}", 200 def put(self, id=None, settings={}): if id == 'file': self.file == settings elif id == 'arduino': if not self.arduino: return "Arduino is not initialized", 400 else: self.arduino.set(**settings) elif id == 'mouse': self.mouse = settings else: self.cameras[int(id)].set(**settings) return self.get(id) def post(self, id=None, settings={}): if id == 'file': self.file = settings elif id == 'cameras': _newcam = Camera(**settings) self.cameras.append( _newcam ) return _newcam.json() elif id == 'arduino': if self.arduino: return "Cannot POST to Initialized Arduino", 400 else: self.arduino = Arduino(**settings) elif id == None: self.file = settings['file'] self.cameras = [Camera(**config) for config in settings['cameras']] self.arduino = Arduino(**settings['arduino']) else: setattr(self, id, settings) return self.get(id) def stop_acquisition(self): self.acquiring = False def check_acquisition_settings(self): if self.acquiring: return ["All Good"] else: errors = [] # Check run settings for key in ['run_length', 'run_length_unit', 'number_of_runs', 'directory']: if not self.file[key]: errors.append(f"{key} is missing from file settings") # CAMERA SETTINGS _camera_settings = [cam.json() for cam in self.cameras] # Check number of cameras if len(_camera_settings) == 0: errors.append("No cameras have been added") # Assert proper number of primary cameras _primary_fr = [cam['framerate'] for cam in _camera_settings if cam['primary']] if len(_primary_fr) == 0: errors.append("You must specify a primary camera") elif len(_primary_fr) > 1: error.append("You can only specify one primary camera") else: self.primary_framerate = _primary_fr[0] _over = [cam['framerate'] < fr for cam in _camera_settings if not cam['primary']] # TODO: Ensure cameras aren't going over their maximum framerate # Check additional data settings for key in ['username', 'mouse']: if not getattr(self, key): errors.append(f"{key} was not specified") return errors def start_acquisition(self): print("Starting an acquisition") path = os.path.join(self.file['directory'], datetime.datetime.now().strftime('%m_%d_%Y_%H%M%S')) os.mkdir(path) for cam in self.cameras: cam.acquiring = True for i in tqdm(range(int(self.file['number_of_runs'])), unit="run"): run = self._create_run() if not run: break else: os.mkdir(f"{path}/run{i}") rl, rlu = self.file['run_length'], self.file['run_length_unit'] num_frames = self.primary_framerate*rl*{"sec":1,"min":60,"hr":3600}[rlu] for j in tqdm(range(int(num_frames)), leave=False, unit="frame"): # Place latest frame from each camera in dict frames = { f"{cam.id}":cam.acquired_frames.get() for cam in self.cameras } # Create thread arguments args = (f"{path}/run{i}/frame{j}.npz", frames, run,) # Start a thread to write to file and mongodb threading.Thread(target=self._write_to_file, args=args).start() run.save() for cam in self.cameras: cam.acquiring = False return True, [] def _create_run(self): # Check to see if MongoDB keys are valid try: mouse = models.Mouse.objects(name=self.mouse).get() user = models.User.objects(username=self.username).get() config = models.Configuration( file=self.file, arduino=self.arduino.json(), cameras=[cam.json() for cam in self.cameras] ).save() return models.Run(mouse=mouse,user=user,configuration=config,frames=[], timestamp=datetime.datetime.now()) except Exception as e: traceback.print_exc() return None def _write_to_file(self, fname, frames, run): np.savez(fname, **frames) frame = models.Frame(file=fname) frame.save() run.frames.append(frame) # ****** Initialize System System ******** system = _System() # ************* System Settings API Calls ****************** @api.route('/system/settings', methods=['GET']) @api.route('/system/settings/<id>', methods=['GET']) def get_settings(id=None): # Retrieve the current settings of the session return jsonify( system.get(id) ) @api.route('/system/settings', methods=['POST']) @api.route('/system/settings/<id>', methods=['POST']) def post_settings(id=None): # Add settings to the current session return jsonify( system.post(id, request.get_json()) ) @api.route('/system/settings/<id>', methods=['PUT']) def put_settings(id): # Adjust settings in the current session return jsonify( system.put(id, request.get_json()) ) @api.route('/system/settings', methods=["DELETE"]) @api.route('/system/settings/<id>', methods=["DELETE"]) def delete_settings(id=None): # Delete settings in the current session return system.delete(id) @api.route('/system/acquisition', methods=["GET"]) def get_acquisition(): return jsonify(system.check_acquisition_settings()) @api.route('/system/acquisition', methods=["DELETE"]) def stop_acquisition(): return "Success", 200 @api.route('/system/acquisition', methods=['POST']) def start_acquisition(): try: system.start_acquisition() return "Success", 200 except Exception as e: traceback.print_exc() return str(e), 404
/** * http://www.openjs.com/scripts/events/keyboard_shortcuts/ * Version : 2.01.B * By Binny V A * License : BSD */ shortcut = { 'all_shortcuts': {}, //All the shortcuts are stored in this array 'add': function (shortcut_combination, callback, opt) { //Provide a set of default options var default_options = { 'type': 'keydown', 'propagate': false, 'disable_in_input': true, 'target': document, 'keycode': false } if (!opt) opt = default_options; else { for (var dfo in default_options) { if (typeof opt[dfo] == 'undefined') opt[dfo] = default_options[dfo]; } } var ele = opt.target; if (typeof opt.target == 'string') ele = document.getElementById(opt.target); var ths = this; shortcut_combination = shortcut_combination.toLowerCase(); //The function to be called at keypress var func = function (e) { e = e || window.event; if (opt['disable_in_input']) { //Don't enable shortcut keys in Input, Textarea fields var element; if (e.target) element = e.target; else if (e.srcElement) element = e.srcElement; if (element.nodeType == 3) element = element.parentNode; if (/*element.tagName == 'INPUT' || */element.tagName == 'TEXTAREA') return; } //Find Which key is pressed if (e.keyCode) code = e.keyCode; else if (e.which) code = e.which; var character = String.fromCharCode(code).toLowerCase(); if (code == 188) character = ","; //If the user presses , when the type is onkeydown if (code == 190) character = "."; //If the user presses , when the type is onkeydown var keys = shortcut_combination.split("+"); //Key Pressed - counts the number of valid keypresses - if it is same as the number of keys, the shortcut function is invoked var kp = 0; //Work around for stupid Shift key bug created by using lowercase - as a result the shift+num combination was broken var shift_nums = { "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&", "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ":", "'": "\"", ",": "<", ".": ">", "/": "?", "\\": "|" } //Special Keys - and their codes var special_keys = { 'esc': 27, 'escape': 27, 'tab': 9, 'space': 32, 'return': 13, 'enter': 13, 'backspace': 8, 'scrolllock': 145, 'scroll_lock': 145, 'scroll': 145, 'capslock': 20, 'caps_lock': 20, 'caps': 20, 'numlock': 144, 'num_lock': 144, 'num': 144, 'pause': 19, 'break': 19, 'insert': 45, 'home': 36, 'delete': 46, 'end': 35, 'pageup': 33, 'page_up': 33, 'pu': 33, 'pagedown': 34, 'page_down': 34, 'pd': 34, 'left': 37, 'up': 38, 'right': 39, 'down': 40, 'f1': 112, 'f2': 113, 'f3': 114, 'f4': 115, 'f5': 116, 'f6': 117, 'f7': 118, 'f8': 119, 'f9': 120, 'f10': 121, 'f11': 122, 'f12': 123 } var modifiers = { shift: { wanted: false, pressed: false }, ctrl: { wanted: false, pressed: false }, alt: { wanted: false, pressed: false }, meta: { wanted: false, pressed: false} //Meta is Mac specific }; if (e.ctrlKey) modifiers.ctrl.pressed = true; if (e.shiftKey) modifiers.shift.pressed = true; if (e.altKey) modifiers.alt.pressed = true; if (e.metaKey) modifiers.meta.pressed = true; for (var i = 0; k = keys[i], i < keys.length; i++) { //Modifiers if (k == 'ctrl' || k == 'control') { kp++; modifiers.ctrl.wanted = true; } else if (k == 'shift') { kp++; modifiers.shift.wanted = true; } else if (k == 'alt') { kp++; modifiers.alt.wanted = true; } else if (k == 'meta') { kp++; modifiers.meta.wanted = true; } else if (k.length > 1) { //If it is a special key if (special_keys[k] == code) kp++; } else if (opt['keycode']) { if (opt['keycode'] == code) kp++; } else { //The special keys did not match if (character == k) kp++; else { if (shift_nums[character] && e.shiftKey) { //Stupid Shift key bug created by using lowercase character = shift_nums[character]; if (character == k) kp++; } } } } if (kp == keys.length && modifiers.ctrl.pressed == modifiers.ctrl.wanted && modifiers.shift.pressed == modifiers.shift.wanted && modifiers.alt.pressed == modifiers.alt.wanted && modifiers.meta.pressed == modifiers.meta.wanted) { callback(e); if (!opt['propagate']) { //Stop the event //e.cancelBubble is supported by IE - this will kill the bubbling process. e.cancelBubble = true; e.returnValue = false; //e.stopPropagation works in Firefox. if (e.stopPropagation) { e.stopPropagation(); e.preventDefault(); } return false; } } } this.all_shortcuts[shortcut_combination] = { 'callback': func, 'target': ele, 'event': opt['type'] }; //Attach the function with the event if (ele.addEventListener) ele.addEventListener(opt['type'], func, false); else if (ele.attachEvent) ele.attachEvent('on' + opt['type'], func); else ele['on' + opt['type']] = func; }, //Remove the shortcut - just specify the shortcut and I will remove the binding 'remove': function (shortcut_combination) { shortcut_combination = shortcut_combination.toLowerCase(); var binding = this.all_shortcuts[shortcut_combination]; delete (this.all_shortcuts[shortcut_combination]) if (!binding) return; var type = binding['event']; var ele = binding['target']; var callback = binding['callback']; if (ele.detachEvent) ele.detachEvent('on' + type, callback); else if (ele.removeEventListener) ele.removeEventListener(type, callback, false); else ele['on' + type] = false; } }
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gslib.help_provider import HELP_NAME from gslib.help_provider import HELP_NAME_ALIASES from gslib.help_provider import HELP_ONE_LINE_SUMMARY from gslib.help_provider import HelpProvider from gslib.help_provider import HELP_TEXT from gslib.help_provider import HelpType from gslib.help_provider import HELP_TYPE _detailed_help_text = (""" <B>DESCRIPTION</B> gsutil supports URI wildcards. For example, the command: gsutil cp gs://bucket/data/abc* . will copy all objects that start with gs://bucket/data/abc followed by any number of characters within that subdirectory. <B>DIRECTORY BY DIRECTORY VS RECURSIVE WILDCARDS</B> The "*" wildcard only matches up to the end of a path within a subdirectory. For example, if bucket contains objects named gs://bucket/data/abcd, gs://bucket/data/abcdef, and gs://bucket/data/abcxyx, as well as an object in a sub-directory (gs://bucket/data/abc/def) the above gsutil cp command would match the first 3 object names but not the last one. If you want matches to span directory boundaries, use a '**' wildcard: gsutil cp gs://bucket/data/abc** . will match all four objects above. Note that gsutil supports the same wildcards for both objects and file names. Thus, for example: gsutil cp data/abc* gs://bucket will match all names in the local file system. Most command shells also support wildcarding, so if you run the above command probably your shell is expanding the matches before running gsutil. However, most shells do not support recursive wildcards ('**'), and you can cause gsutil's wildcarding support to work for such shells by single-quoting the arguments so they don't get interpreted by the shell before being passed to gsutil: gsutil cp 'data/abc**' gs://bucket <B>BUCKET WILDCARDS</B> You can specify wildcards for bucket names. For example: gsutil ls gs://data*.example.com will list the contents of all buckets whose name starts with "data" and ends with ".example.com". You can also combine bucket and object name wildcards. For example this command will remove all ".txt" files in any of your Google Cloud Storage buckets: gsutil rm gs://*/**.txt <B>OTHER WILDCARD CHARACTERS</B> In addition to '*', you can use these wildcards: ? Matches a single character. For example "gs://bucket/??.txt" only matches objects with two characters followed by .txt. [chars] Match any of the specified characters. For example "gs://bucket/[aeiou].txt" matches objects that contain a single vowel character followed by .txt [char range] Match any of the range of characters. For example "gs://bucket/[a-m].txt" matches objects that contain letters a, b, c, ... or m, and end with .txt. You can combine wildcards to provide more powerful matches, for example: gs://bucket/[a-m]??.j*g <B>EFFICIENCY CONSIDERATION: USING WILDCARDS OVER MANY OBJECTS</B> It is more efficient, faster, and less network traffic-intensive to use wildcards that have a non-wildcard object-name prefix, like: gs://bucket/abc*.txt than it is to use wildcards as the first part of the object name, like: gs://bucket/*abc.txt This is because the request for "gs://bucket/abc*.txt" asks the server to send back the subset of results whose object names start with "abc", and then gsutil filters the result list for objects whose name ends with ".txt". In contrast, "gs://bucket/*abc.txt" asks the server for the complete list of objects in the bucket and then filters for those objects whose name ends with "abc.txt". This efficiency consideration becomes increasingly noticeable when you use buckets containing thousands or more objects. It is sometimes possible to set up the names of your objects to fit with expected wildcard matching patterns, to take advantage of the efficiency of doing server-side prefix requests. See, for example "gsutil help prod" for a concrete use case example. <B>EFFICIENCY CONSIDERATION: USING MID-PATH WILDCARDS</B> Suppose you have a bucket with these objects: gs://bucket/obj1 gs://bucket/obj2 gs://bucket/obj3 gs://bucket/obj4 gs://bucket/dir1/obj5 gs://bucket/dir2/obj6 If you run the command: gsutil ls gs://bucket/*/obj5 gsutil will perform a /-delimited top-level bucket listing and then one bucket listing for each subdirectory, for a total of 3 bucket listings: GET /bucket/?delimiter=/ GET /bucket/?prefix=dir1/obj5&delimiter=/ GET /bucket/?prefix=dir2/obj5&delimiter=/ The more bucket listings your wildcard requires, the slower and more expensive it will be. The number of bucket listings required grows as: - the number of wildcard components (e.g., "gs://bucket/a??b/c*/*/d" has 3 wildcard components); - the number of subdirectories that match each component; and - the number of results (pagination is implemented using one GET request per 1000 results, specifying markers for each). If you want to use a mid-path wildcard, you might try instead using a recursive wildcard, for example: gsutil ls gs://bucket/**/obj5 This will match more objects than "gs://bucket/*/obj5" (since it spans directories), but is implemented using a delimiter-less bucket listing request (which means fewer bucket requests, though it will list the entire bucket and filter locally, so that could require a non-trivial amount of network traffic). """) class CommandOptions(HelpProvider): """Additional help about wildcards.""" help_spec = { # Name of command or auxiliary help info for which this help applies. HELP_NAME : 'wildcards', # List of help name aliases. HELP_NAME_ALIASES : ['wildcard', '*', '**'], # Type of help: HELP_TYPE : HelpType.ADDITIONAL_HELP, # One line summary of this help. HELP_ONE_LINE_SUMMARY : 'Wildcard Names', # The full help text. HELP_TEXT : _detailed_help_text, }
import React from 'react'; import { graphql } from 'gatsby'; import Seo from '../../components/seo'; import BookSummaries from '../../components/book-summaries'; const BooksPage = ({ data: { allMdx: { nodes }, }, }) => { return ( <> <h1>Book Notes</h1> <Seo title="Book Notes" slug="books" description="My summaries and notes about some of the books I've read." /> <BookSummaries books={nodes} /> </> ); }; export default BooksPage; export const pageQuery = graphql` { allMdx( limit: 2000 sort: { fields: [frontmatter___date], order: DESC } filter: { fields: { collection: { eq: "books" } } } ) { nodes { frontmatter { title tags emoji date(formatString: "DD MMMM YYYY") bookInfo { title author coverImage { childImageSharp { fixed(width: 96) { ...GatsbyImageSharpFixed } } } } } slug } } } `;
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. cr.define('print_preview.ticket_items', function() { 'use strict'; /** * Media size ticket item. * @param {!print_preview.AppState} appState App state used to persist media * size selection. * @param {!print_preview.DestinationStore} destinationStore Destination store * used to determine if a destination has the media size capability. * @param {!print_preview.DocumentInfo} documentInfo Information about the * document to print. * @param {!print_preview.ticket_items.MarginsType} marginsType Reset when * landscape value changes. * @param {!print_preview.ticket_items.CustomMargins} customMargins Reset when * landscape value changes. * @constructor * @extends {print_preview.ticket_items.TicketItem} */ function MediaSize( appState, destinationStore, documentInfo, marginsType, customMargins) { print_preview.ticket_items.TicketItem.call( this, appState, print_preview.AppStateField.MEDIA_SIZE, destinationStore, documentInfo); /** * Margins ticket item. Reset when this item changes. * @private {!print_preview.ticket_items.MarginsType} */ this.marginsType_ = marginsType; /** * Custom margins ticket item. Reset when this item changes. * @private {!print_preview.ticket_items.CustomMargins} */ this.customMargins_ = customMargins; } MediaSize.prototype = { __proto__: print_preview.ticket_items.TicketItem.prototype, /** @override */ wouldValueBeValid: function(value) { if (!this.isCapabilityAvailable()) { return false; } return this.capability.option.some(function(option) { return option.width_microns == value.width_microns && option.height_microns == value.height_microns && option.is_continuous_feed == value.is_continuous_feed && option.vendor_id == value.vendor_id; }); }, /** @override */ isCapabilityAvailable: function() { var knownSizeToSaveAsPdf = (!this.getDocumentInfoInternal().isModifiable || this.getDocumentInfoInternal().hasCssMediaStyles) && this.getSelectedDestInternal() && this.getSelectedDestInternal().id == print_preview.Destination.GooglePromotedId.SAVE_AS_PDF; return !knownSizeToSaveAsPdf && !!this.capability; }, /** @override */ isValueEqual: function(value) { var myValue = this.getValue(); return myValue.width_microns == value.width_microns && myValue.height_microns == value.height_microns && myValue.is_continuous_feed == value.is_continuous_feed && myValue.vendor_id == value.vendor_id; }, /** @return {Object} Media size capability of the selected destination. */ get capability() { var destination = this.getSelectedDestInternal(); return (destination && destination.capabilities && destination.capabilities.printer && destination.capabilities.printer.media_size) || null; }, /** @override */ getDefaultValueInternal: function() { var defaultOptions = this.capability.option.filter(function(option) { return option.is_default; }); return defaultOptions.length > 0 ? defaultOptions[0] : null; }, /** @override */ getCapabilityNotAvailableValueInternal: function() { return {}; }, /** @override */ updateValueInternal: function(value) { var updateMargins = !this.isValueEqual(value); print_preview.ticket_items.TicketItem.prototype.updateValueInternal.call( this, value); if (updateMargins) { // Reset the user set margins when media size changes. this.marginsType_.updateValue( print_preview.ticket_items.MarginsTypeValue.DEFAULT); this.customMargins_.updateValue(null); } } }; // Export return {MediaSize: MediaSize}; });
'use strict'; Object.defineProperty(exports, '__esModule', { value: true }); var isArray = require('./isArray'); var isArrayStrict = require('./isArrayStrict'); var isBe = require('./isBe'); var isBigint = require('./isBigint'); var isBoolean = require('./isBoolean'); var isFalse = require('./isFalse'); var isFunction = require('./isFunction'); var isNativeFunction = require('./isNativeFunction'); var isNill = require('./isNill'); var isNull = require('./isNull'); var isNumber = require('./isNumber'); var isObject = require('./isObject'); var isObjectStrict = require('./isObjectStrict'); var isPromise = require('./isPromise'); var isRegExp = require('./isRegExp'); var isString = require('./isString'); var isSymbol = require('./isSymbol'); var isTrue = require('./isTrue'); var isUndefined = require('./isUndefined'); var isVoid = require('./isVoid'); Object.defineProperty(exports, 'isArray', { enumerable: true, get: function () { return isArray.isArray; } }); Object.defineProperty(exports, 'isArrayStrict', { enumerable: true, get: function () { return isArrayStrict.isArrayStrict; } }); Object.defineProperty(exports, 'isBe', { enumerable: true, get: function () { return isBe.isBe; } }); Object.defineProperty(exports, 'isBigint', { enumerable: true, get: function () { return isBigint.isBigint; } }); Object.defineProperty(exports, 'isBoolean', { enumerable: true, get: function () { return isBoolean.isBoolean; } }); Object.defineProperty(exports, 'isFalse', { enumerable: true, get: function () { return isFalse.isFalse; } }); Object.defineProperty(exports, 'isFunction', { enumerable: true, get: function () { return isFunction.isFunction; } }); Object.defineProperty(exports, 'isNativeFunction', { enumerable: true, get: function () { return isNativeFunction.isNativeFunction; } }); Object.defineProperty(exports, 'isNill', { enumerable: true, get: function () { return isNill.isNill; } }); Object.defineProperty(exports, 'isNull', { enumerable: true, get: function () { return isNull.isNull; } }); Object.defineProperty(exports, 'isNumber', { enumerable: true, get: function () { return isNumber.isNumber; } }); Object.defineProperty(exports, 'isObject', { enumerable: true, get: function () { return isObject.isObject; } }); Object.defineProperty(exports, 'isObjectStrict', { enumerable: true, get: function () { return isObjectStrict.isObjectStrict; } }); Object.defineProperty(exports, 'isPromise', { enumerable: true, get: function () { return isPromise.isPromise; } }); Object.defineProperty(exports, 'isRegExp', { enumerable: true, get: function () { return isRegExp.isRegExp; } }); Object.defineProperty(exports, 'isString', { enumerable: true, get: function () { return isString.isString; } }); Object.defineProperty(exports, 'isSymbol', { enumerable: true, get: function () { return isSymbol.isSymbol; } }); Object.defineProperty(exports, 'isTrue', { enumerable: true, get: function () { return isTrue.isTrue; } }); Object.defineProperty(exports, 'isUndefined', { enumerable: true, get: function () { return isUndefined.isUndefined; } }); Object.defineProperty(exports, 'isVoid', { enumerable: true, get: function () { return isVoid.isVoid; } });
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import numpy as np import paddle from paddle.fluid import layers from paddle2onnx.op_mapper import CustomPaddleOp, register_custom_paddle_op from paddle2onnx import utils from paddle2onnx.constant import dtypes class DeformConv2d(CustomPaddleOp): def check_attribute(self, node): utils.compare_attr_between_dims( node.attr('strides'), (0, 1), 'strides', 'equal') utils.compare_attr_between_dims( node.attr('paddings'), (0, 1), 'paddings', 'equal') utils.compare_attr_between_dims( node.input_shape('Offset', 0), (2, 3), 'Offset', 'equal') utils.compare_attr( node.attr('deformable_groups'), 1, 'deformable_groups', 'equal') def __init__(self, node, **kw): super(DeformConv2d, self).__init__(node) self.check_attribute(node) self.in_channel = node.input_shape('Input', 0)[1] self.offset_channel = node.input_shape('Offset', 0)[1] self.stride = node.attr('strides')[0] self.padding = node.attr('paddings') if len(self.padding) == 2: self.padding += self.padding self.groups = node.attr('groups') self.dilation = node.attr('dilations')[0] self.padded_x_h = node.input_shape('Input', 0)[2] self.padded_x_w = node.input_shape('Input', 0)[3] if self.padded_x_h > 0: self.padded_x_h = self.padded_x_h + self.padding[0] + self.padding[1] if self.padded_x_w > 0: self.padded_x_w = self.padded_x_w + self.padding[2] + self.padding[3] self.kernel_size = node.input_shape('Filter', 0)[2] self.N = self.kernel_size**2 self.num_filters = node.input_shape('Filter', 0)[0] def forward(self): input = self.input('Input', 0) weight = self.input('Filter', 0) mask = self.input('Mask', 0) offset = self.input('Offset', 0) input = layers.pad2d(input, self.padding) input_shape = paddle.shape(input) if self.padded_x_h < 0 or self.padded_x_w < 0: self.padded_x_h = input_shape[2] self.padded_x_w = input_shape[3] offset_x = paddle.strided_slice( offset, axes=[1], starts=[0], ends=[self.offset_channel], strides=[2]) offset_y = paddle.strided_slice( offset, axes=[1], starts=[1], ends=[self.offset_channel], strides=[2]) offset = paddle.concat([offset_x, offset_y], axis=1) offset_shape = paddle.shape(offset) offset_h = offset_shape[2] offset_w = offset_shape[3] coordinate = self.get_offset_coordinate(offset, 'float32', offset_shape) coordinate = coordinate.transpose((0, 2, 3, 1)) coord_lt, coord_rb, coord_lb, coord_rt = self.get_bilinear_corner_coordinate( coordinate, self.padded_x_h, self.padded_x_w) # clip coordinate coordinate = paddle.concat( [ paddle.clip(coordinate[:, :, :, :self.N], 0, self.padded_x_h - 1), paddle.clip(coordinate[:, :, :, self.N:], 0, self.padded_x_w - 1) ], axis=-1) cof_lt, cof_rb, cof_lb, cof_rt = self.get_bilinear_coefficient( coord_lt, coord_rb, coord_lb, coord_rt, coordinate) feature_lt = self.get_feature_by_coordinate(input, coord_lt, offset_h, offset_w, self.padded_x_w) feature_rb = self.get_feature_by_coordinate(input, coord_rb, offset_h, offset_w, self.padded_x_w) feature_lb = self.get_feature_by_coordinate(input, coord_lb, offset_h, offset_w, self.padded_x_w) feature_rt = self.get_feature_by_coordinate(input, coord_rt, offset_h, offset_w, self.padded_x_w) feature_after_deformation = paddle.unsqueeze(cof_lt, 1) * feature_lt + \ paddle.unsqueeze(cof_rb, 1) * feature_rb + \ paddle.unsqueeze(cof_lb, 1) * feature_lb + \ paddle.unsqueeze(cof_rt, 1) * feature_rt # modulation if mask is not None: mask = paddle.transpose(mask, (0, 2, 3, 1)) mask = paddle.unsqueeze(mask, 1) mask = paddle.tile(mask, [1, self.in_channel, 1, 1, 1]) feature_after_deformation *= mask feature_after_deformation = self.reshape_feature( feature_after_deformation, offset_h, offset_w) out = paddle.nn.functional.conv2d( feature_after_deformation, weight, stride=self.kernel_size, groups=self.groups) return {'Output': [out]} def get_offset_coordinate(self, offset, dtype, offset_shape): kernel_grid_origin_x = paddle.arange( 0, self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1), step=self.dilation, dtype=dtype) kernel_grid_origin_x = kernel_grid_origin_x.unsqueeze(1) kernel_grid_origin_x = paddle.tile(kernel_grid_origin_x, [1, self.kernel_size]) kernel_grid_origin_y = paddle.arange( 0, self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1), step=self.dilation, dtype=dtype) kernel_grid_origin_y = kernel_grid_origin_y.unsqueeze(0) kernel_grid_origin_y = paddle.tile(kernel_grid_origin_y, [self.kernel_size, 1]) kernel_grid_origin_x = paddle.reshape(kernel_grid_origin_x, [-1]) kernel_grid_origin_y = paddle.reshape(kernel_grid_origin_y, [-1]) kernel_grid_origin = paddle.concat( [kernel_grid_origin_x, kernel_grid_origin_y], -1) kernel_grid_origin = paddle.reshape(kernel_grid_origin, (1, 2 * self.N, 1, 1)) kernel_offset_x = paddle.arange( 0, offset_shape[2] * self.stride, step=self.stride, dtype=dtype) kernel_offset_x = kernel_offset_x.unsqueeze(1) kernel_offset_x = paddle.expand(kernel_offset_x, offset_shape[2:]) kernel_offset_y = paddle.arange( 0, offset_shape[3] * self.stride, step=self.stride, dtype=dtype) kernel_offset_y = kernel_offset_y.unsqueeze(0) kernel_offset_y = paddle.expand(kernel_offset_y, offset_shape[2:]) kernel_offset_x = kernel_offset_x.unsqueeze([0, 1]) kernel_offset_x = paddle.tile(kernel_offset_x, (1, self.N, 1, 1)) kernel_offset_y = kernel_offset_y.unsqueeze([0, 1]) kernel_offset_y = paddle.tile(kernel_offset_y, (1, self.N, 1, 1)) kernel_offset = paddle.concat([kernel_offset_x, kernel_offset_y], 1) offset = offset + paddle.cast(kernel_offset, 'float32') + paddle.cast( kernel_grid_origin, 'float32') return offset def get_bilinear_corner_coordinate(self, coord, padded_h, padded_w): coord_lt = coord.floor() coord_rb = coord_lt + 1 coord_lt = paddle.cast( paddle.concat( [ paddle.clip(coord_lt[:, :, :, :self.N], 0, padded_h - 1), paddle.clip(coord_lt[:, :, :, self.N:], 0, padded_w - 1) ], axis=-1), dtype='int64') coord_rb = paddle.cast( paddle.concat( [ paddle.clip(coord_rb[:, :, :, :self.N], 0, padded_h - 1), paddle.clip(coord_rb[:, :, :, self.N:], 0, padded_w - 1) ], axis=-1), dtype='int64') coord_lb = paddle.concat( [coord_lt[:, :, :, :self.N], coord_rb[:, :, :, self.N:]], axis=-1) coord_rt = paddle.concat( [coord_rb[:, :, :, :self.N], coord_lt[:, :, :, self.N:]], axis=-1) return coord_lt, coord_rb, coord_lb, coord_rt def get_bilinear_coefficient(self, coord_lt, coord_rb, coord_lb, coord_rt, p): cof_lt = (1 + (paddle.cast( coord_lt[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N]) ) * (1 + paddle.cast( coord_lt[:, :, :, self.N:], dtype='float32') - p[:, :, :, self.N:]) cof_rb = (1 - (paddle.cast( coord_rb[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N]) ) * (1 - (paddle.cast( coord_rb[:, :, :, self.N:], dtype='float32') - p[:, :, :, self.N:])) cof_lb = (1 + (paddle.cast( coord_lb[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N]) ) * (1 - (paddle.cast( coord_lb[:, :, :, self.N:], dtype='float32') - p[:, :, :, self.N:])) cof_rt = (1 - (paddle.cast( coord_rt[:, :, :, :self.N], dtype='float32') - p[:, :, :, :self.N]) ) * (1 + paddle.cast( coord_rt[:, :, :, self.N:], dtype='float32') - p[:, :, :, self.N:]) return cof_lt, cof_rb, cof_lb, cof_rt def get_feature_by_coordinate(self, x, coord, offset_h, offset_w, padded_x_w): x = paddle.reshape(x, [0, 0, -1]) index = paddle.cast( coord[:, :, :, :self.N] * padded_x_w, dtype='int64') + coord[:, :, :, self.N:] # offset_x*w + offset_y index = paddle.unsqueeze(index, 1) index = paddle.tile(index, [1, self.in_channel, 1, 1, 1]) index = paddle.reshape(index, (0, 0, -1)) x_range = list(range(3)) dim = 2 x_range[0] = dim x_range[dim] = 0 x_swaped = paddle.transpose(x, perm=x_range) index_range = list(range(3)) index_range[0] = dim index_range[dim] = 0 index_swaped = paddle.transpose(index, perm=index_range) x_shape = layers.shape(x_swaped) index_shape = layers.shape(index_swaped) prod = paddle.prod(x_shape[1:], keepdim=True) x_swaped_flattend = paddle.reshape(x_swaped, [-1]) index_swaped_flattend = paddle.reshape(index_swaped, [-1]) index_swaped_flattend *= prod bias = paddle.arange(start=0, end=prod, step=1, dtype='float32') bias = paddle.tile(bias, index_shape[0]) index_swaped_flattend += bias gathered = paddle.gather(x_swaped_flattend, index_swaped_flattend) gathered = paddle.reshape(gathered, layers.shape(index_swaped)) x_offset = paddle.transpose(gathered, perm=x_range) x_offset = paddle.reshape( x_offset, (-1, self.in_channel, offset_h, offset_w, self.N)) return x_offset def reshape_feature(self, x_offset, offset_h, offset_w): x_offset = paddle.concat( [ paddle.reshape(x_offset[:, :, :, :, s:s + self.kernel_size], ( -1, self.in_channel, offset_h, offset_w * self.kernel_size)) for s in range(0, self.N, self.kernel_size) ], axis=-1) x_offset = paddle.reshape(x_offset, (-1, self.in_channel, offset_h * self.kernel_size, offset_w * self.kernel_size)) return x_offset register_custom_paddle_op('deformable_conv', DeformConv2d)
/** * The copyright in this software is being made available under the BSD License, * included below. This software may be subject to other third party and contributor * rights, including patent rights, and no such rights are granted under this license. * * Copyright (c) 2013, Dash Industry Forum. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of Dash Industry Forum nor the names of its * contributors may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ import Errors from '../../core/errors/Errors'; import EventBus from '../../core/EventBus'; import Events from '../../core/events/Events'; import BlacklistController from '../controllers/BlacklistController'; import DVBSelector from './baseUrlResolution/DVBSelector'; import BasicSelector from './baseUrlResolution/BasicSelector'; import FactoryMaker from '../../core/FactoryMaker'; import DashJSError from '../vo/DashJSError'; import Constants from '../constants/Constants'; function BaseURLSelector() { const context = this.context; const eventBus = EventBus(context).getInstance(); let dashManifestModel; let instance, serviceLocationBlacklistController, basicSelector, dvbSelector, selector; function setup() { serviceLocationBlacklistController = BlacklistController(context).create({ updateEventName: Events.SERVICE_LOCATION_BLACKLIST_CHANGED, addBlacklistEventName: Events.SERVICE_LOCATION_BLACKLIST_ADD }); basicSelector = BasicSelector(context).create({ blacklistController: serviceLocationBlacklistController }); dvbSelector = DVBSelector(context).create({ blacklistController: serviceLocationBlacklistController }); selector = basicSelector; } function setConfig(config) { if (config.selector) { selector = config.selector; } if (config.dashManifestModel) { dashManifestModel = config.dashManifestModel; } } function checkConfig() { if (!dashManifestModel || !dashManifestModel.hasOwnProperty('getIsDVB')) { throw new Error(Constants.MISSING_CONFIG_ERROR); } } function chooseSelectorFromManifest(manifest) { checkConfig(); if (dashManifestModel.getIsDVB(manifest)) { selector = dvbSelector; } else { selector = basicSelector; } } function select(data) { if (!data) { return; } const baseUrls = data.baseUrls; const selectedIdx = data.selectedIdx; // Once a random selection has been carried out amongst a group of BaseURLs with the same // @priority attribute value, then that choice should be re-used if the selection needs to be made again // unless the blacklist has been modified or the available BaseURLs have changed. if (!isNaN(selectedIdx)) { return baseUrls[selectedIdx]; } let selectedBaseUrl = selector.select(baseUrls); if (!selectedBaseUrl) { eventBus.trigger( Events.URL_RESOLUTION_FAILED, { error: new DashJSError( Errors.URL_RESOLUTION_FAILED_GENERIC_ERROR_CODE, Errors.URL_RESOLUTION_FAILED_GENERIC_ERROR_MESSAGE ) } ); if (selector === basicSelector) { reset(); } return; } data.selectedIdx = baseUrls.indexOf(selectedBaseUrl); return selectedBaseUrl; } function reset() { serviceLocationBlacklistController.reset(); } instance = { chooseSelectorFromManifest: chooseSelectorFromManifest, select: select, reset: reset, setConfig: setConfig }; setup(); return instance; } BaseURLSelector.__dashjs_factory_name = 'BaseURLSelector'; export default FactoryMaker.getClassFactory(BaseURLSelector);
/*------------------------------------------------------------------------- * * procarray.c * POSTGRES process array code. * * * This module maintains arrays of PGPROC substructures, as well as associated * arrays in ProcGlobal, for all active backends. Although there are several * uses for this, the principal one is as a means of determining the set of * currently running transactions. * * Because of various subtle race conditions it is critical that a backend * hold the correct locks while setting or clearing its xid (in * ProcGlobal->xids[]/MyProc->xid). See notes in * src/backend/access/transam/README. * * The process arrays now also include structures representing prepared * transactions. The xid and subxids fields of these are valid, as are the * myProcLocks lists. They can be distinguished from regular backend PGPROCs * at need by checking for pid == 0. * * During hot standby, we also keep a list of XIDs representing transactions * that are known to be running on the primary (or more precisely, were running * as of the current point in the WAL stream). This list is kept in the * KnownAssignedXids array, and is updated by watching the sequence of * arriving XIDs. This is necessary because if we leave those XIDs out of * snapshots taken for standby queries, then they will appear to be already * complete, leading to MVCC failures. Note that in hot standby, the PGPROC * array represents standby processes, which by definition are not running * transactions that have XIDs. * * It is perhaps possible for a backend on the primary to terminate without * writing an abort record for its transaction. While that shouldn't really * happen, it would tie up KnownAssignedXids indefinitely, so we protect * ourselves by pruning the array when a valid list of running XIDs arrives. * * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/storage/ipc/procarray.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include <signal.h> #include "access/clog.h" #include "access/subtrans.h" #include "access/transam.h" #include "access/twophase.h" #include "access/xact.h" #include "access/xlog.h" #include "catalog/catalog.h" #include "catalog/pg_authid.h" #include "commands/dbcommands.h" #include "miscadmin.h" #include "pgstat.h" #include "storage/proc.h" #include "storage/procarray.h" #include "storage/spin.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/rel.h" #include "utils/snapmgr.h" #define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var)))) /* Our shared memory area */ typedef struct ProcArrayStruct { int numProcs; /* number of valid procs entries */ int maxProcs; /* allocated size of procs array */ /* * Known assigned XIDs handling */ int maxKnownAssignedXids; /* allocated size of array */ int numKnownAssignedXids; /* current # of valid entries */ int tailKnownAssignedXids; /* index of oldest valid element */ int headKnownAssignedXids; /* index of newest element, + 1 */ slock_t known_assigned_xids_lck; /* protects head/tail pointers */ /* * Highest subxid that has been removed from KnownAssignedXids array to * prevent overflow; or InvalidTransactionId if none. We track this for * similar reasons to tracking overflowing cached subxids in PGPROC * entries. Must hold exclusive ProcArrayLock to change this, and shared * lock to read it. */ TransactionId lastOverflowedXid; /* oldest xmin of any replication slot */ TransactionId replication_slot_xmin; /* oldest catalog xmin of any replication slot */ TransactionId replication_slot_catalog_xmin; /* indexes into allProcs[], has PROCARRAY_MAXPROCS entries */ int pgprocnos[FLEXIBLE_ARRAY_MEMBER]; } ProcArrayStruct; /* * State for the GlobalVisTest* family of functions. Those functions can * e.g. be used to decide if a deleted row can be removed without violating * MVCC semantics: If the deleted row's xmax is not considered to be running * by anyone, the row can be removed. * * To avoid slowing down GetSnapshotData(), we don't calculate a precise * cutoff XID while building a snapshot (looking at the frequently changing * xmins scales badly). Instead we compute two boundaries while building the * snapshot: * * 1) definitely_needed, indicating that rows deleted by XIDs >= * definitely_needed are definitely still visible. * * 2) maybe_needed, indicating that rows deleted by XIDs < maybe_needed can * definitely be removed * * When testing an XID that falls in between the two (i.e. XID >= maybe_needed * && XID < definitely_needed), the boundaries can be recomputed (using * ComputeXidHorizons()) to get a more accurate answer. This is cheaper than * maintaining an accurate value all the time. * * As it is not cheap to compute accurate boundaries, we limit the number of * times that happens in short succession. See GlobalVisTestShouldUpdate(). * * * There are three backend lifetime instances of this struct, optimized for * different types of relations. As e.g. a normal user defined table in one * database is inaccessible to backends connected to another database, a test * specific to a relation can be more aggressive than a test for a shared * relation. Currently we track four different states: * * 1) GlobalVisSharedRels, which only considers an XID's * effects visible-to-everyone if neither snapshots in any database, nor a * replication slot's xmin, nor a replication slot's catalog_xmin might * still consider XID as running. * * 2) GlobalVisCatalogRels, which only considers an XID's * effects visible-to-everyone if neither snapshots in the current * database, nor a replication slot's xmin, nor a replication slot's * catalog_xmin might still consider XID as running. * * I.e. the difference to GlobalVisSharedRels is that * snapshot in other databases are ignored. * * 3) GlobalVisDataRels, which only considers an XID's * effects visible-to-everyone if neither snapshots in the current * database, nor a replication slot's xmin consider XID as running. * * I.e. the difference to GlobalVisCatalogRels is that * replication slot's catalog_xmin is not taken into account. * * 4) GlobalVisTempRels, which only considers the current session, as temp * tables are not visible to other sessions. * * GlobalVisTestFor(relation) returns the appropriate state * for the relation. * * The boundaries are FullTransactionIds instead of TransactionIds to avoid * wraparound dangers. There e.g. would otherwise exist no procarray state to * prevent maybe_needed to become old enough after the GetSnapshotData() * call. * * The typedef is in the header. */ struct GlobalVisState { /* XIDs >= are considered running by some backend */ FullTransactionId definitely_needed; /* XIDs < are not considered to be running by any backend */ FullTransactionId maybe_needed; }; /* * Result of ComputeXidHorizons(). */ typedef struct ComputeXidHorizonsResult { /* * The value of ShmemVariableCache->latestCompletedXid when * ComputeXidHorizons() held ProcArrayLock. */ FullTransactionId latest_completed; /* * The same for procArray->replication_slot_xmin and. * procArray->replication_slot_catalog_xmin. */ TransactionId slot_xmin; TransactionId slot_catalog_xmin; /* * Oldest xid that any backend might still consider running. This needs to * include processes running VACUUM, in contrast to the normal visibility * cutoffs, as vacuum needs to be able to perform pg_subtrans lookups when * determining visibility, but doesn't care about rows above its xmin to * be removed. * * This likely should only be needed to determine whether pg_subtrans can * be truncated. It currently includes the effects of replication slots, * for historical reasons. But that could likely be changed. */ TransactionId oldest_considered_running; /* * Oldest xid for which deleted tuples need to be retained in shared * tables. * * This includes the effects of replication slots. If that's not desired, * look at shared_oldest_nonremovable_raw; */ TransactionId shared_oldest_nonremovable; /* * Oldest xid that may be necessary to retain in shared tables. This is * the same as shared_oldest_nonremovable, except that is not affected by * replication slot's catalog_xmin. * * This is mainly useful to be able to send the catalog_xmin to upstream * streaming replication servers via hot_standby_feedback, so they can * apply the limit only when accessing catalog tables. */ TransactionId shared_oldest_nonremovable_raw; /* * Oldest xid for which deleted tuples need to be retained in non-shared * catalog tables. */ TransactionId catalog_oldest_nonremovable; /* * Oldest xid for which deleted tuples need to be retained in normal user * defined tables. */ TransactionId data_oldest_nonremovable; /* * Oldest xid for which deleted tuples need to be retained in this * session's temporary tables. */ TransactionId temp_oldest_nonremovable; } ComputeXidHorizonsResult; /* * Return value for GlobalVisHorizonKindForRel(). */ typedef enum GlobalVisHorizonKind { VISHORIZON_SHARED, VISHORIZON_CATALOG, VISHORIZON_DATA, VISHORIZON_TEMP } GlobalVisHorizonKind; static ProcArrayStruct *procArray; static PGPROC *allProcs; /* * Bookkeeping for tracking emulated transactions in recovery */ static TransactionId *KnownAssignedXids; static bool *KnownAssignedXidsValid; static TransactionId latestObservedXid = InvalidTransactionId; /* * If we're in STANDBY_SNAPSHOT_PENDING state, standbySnapshotPendingXmin is * the highest xid that might still be running that we don't have in * KnownAssignedXids. */ static TransactionId standbySnapshotPendingXmin; /* * State for visibility checks on different types of relations. See struct * GlobalVisState for details. As shared, catalog, normal and temporary * relations can have different horizons, one such state exists for each. */ static GlobalVisState GlobalVisSharedRels; static GlobalVisState GlobalVisCatalogRels; static GlobalVisState GlobalVisDataRels; static GlobalVisState GlobalVisTempRels; /* * This backend's RecentXmin at the last time the accurate xmin horizon was * recomputed, or InvalidTransactionId if it has not. Used to limit how many * times accurate horizons are recomputed. See GlobalVisTestShouldUpdate(). */ static TransactionId ComputeXidHorizonsResultLastXmin; snapshot_hook_type snapshot_hook = NULL; #ifdef XIDCACHE_DEBUG /* counters for XidCache measurement */ static long xc_by_recent_xmin = 0; static long xc_by_known_xact = 0; static long xc_by_my_xact = 0; static long xc_by_latest_xid = 0; static long xc_by_main_xid = 0; static long xc_by_child_xid = 0; static long xc_by_known_assigned = 0; static long xc_no_overflow = 0; static long xc_slow_answer = 0; #define xc_by_recent_xmin_inc() (xc_by_recent_xmin++) #define xc_by_known_xact_inc() (xc_by_known_xact++) #define xc_by_my_xact_inc() (xc_by_my_xact++) #define xc_by_latest_xid_inc() (xc_by_latest_xid++) #define xc_by_main_xid_inc() (xc_by_main_xid++) #define xc_by_child_xid_inc() (xc_by_child_xid++) #define xc_by_known_assigned_inc() (xc_by_known_assigned++) #define xc_no_overflow_inc() (xc_no_overflow++) #define xc_slow_answer_inc() (xc_slow_answer++) static void DisplayXidCache(void); #else /* !XIDCACHE_DEBUG */ #define xc_by_recent_xmin_inc() ((void) 0) #define xc_by_known_xact_inc() ((void) 0) #define xc_by_my_xact_inc() ((void) 0) #define xc_by_latest_xid_inc() ((void) 0) #define xc_by_main_xid_inc() ((void) 0) #define xc_by_child_xid_inc() ((void) 0) #define xc_by_known_assigned_inc() ((void) 0) #define xc_no_overflow_inc() ((void) 0) #define xc_slow_answer_inc() ((void) 0) #endif /* XIDCACHE_DEBUG */ /* Primitives for KnownAssignedXids array handling for standby */ static void KnownAssignedXidsCompress(bool force); static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, bool exclusive_lock); static bool KnownAssignedXidsSearch(TransactionId xid, bool remove); static bool KnownAssignedXidExists(TransactionId xid); static void KnownAssignedXidsRemove(TransactionId xid); static void KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids, TransactionId *subxids); static void KnownAssignedXidsRemovePreceding(TransactionId xid); static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax); static int KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin, TransactionId xmax); static TransactionId KnownAssignedXidsGetOldestXmin(void); static void KnownAssignedXidsDisplay(int trace_level); static void KnownAssignedXidsReset(void); static inline void ProcArrayEndTransactionInternal(PGPROC *proc, TransactionId latestXid); static void ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid); static void MaintainLatestCompletedXid(TransactionId latestXid); static void MaintainLatestCompletedXidRecovery(TransactionId latestXid); static inline FullTransactionId FullXidRelativeTo(FullTransactionId rel, TransactionId xid); static void GlobalVisUpdateApply(ComputeXidHorizonsResult *horizons); /* * Report shared-memory space needed by CreateSharedProcArray. */ Size ProcArrayShmemSize(void) { Size size; /* Size of the ProcArray structure itself */ #define PROCARRAY_MAXPROCS (MaxBackends + max_prepared_xacts) size = offsetof(ProcArrayStruct, pgprocnos); size = add_size(size, mul_size(sizeof(int), PROCARRAY_MAXPROCS)); /* * During Hot Standby processing we have a data structure called * KnownAssignedXids, created in shared memory. Local data structures are * also created in various backends during GetSnapshotData(), * TransactionIdIsInProgress() and GetRunningTransactionData(). All of the * main structures created in those functions must be identically sized, * since we may at times copy the whole of the data structures around. We * refer to this size as TOTAL_MAX_CACHED_SUBXIDS. * * Ideally we'd only create this structure if we were actually doing hot * standby in the current run, but we don't know that yet at the time * shared memory is being set up. */ #define TOTAL_MAX_CACHED_SUBXIDS \ ((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS) if (EnableHotStandby) { size = add_size(size, mul_size(sizeof(TransactionId), TOTAL_MAX_CACHED_SUBXIDS)); size = add_size(size, mul_size(sizeof(bool), TOTAL_MAX_CACHED_SUBXIDS)); } return size; } /* * Initialize the shared PGPROC array during postmaster startup. */ void CreateSharedProcArray(void) { bool found; /* Create or attach to the ProcArray shared structure */ procArray = (ProcArrayStruct *) ShmemInitStruct("Proc Array", add_size(offsetof(ProcArrayStruct, pgprocnos), mul_size(sizeof(int), PROCARRAY_MAXPROCS)), &found); if (!found) { /* * We're the first - initialize. */ procArray->numProcs = 0; procArray->maxProcs = PROCARRAY_MAXPROCS; procArray->maxKnownAssignedXids = TOTAL_MAX_CACHED_SUBXIDS; procArray->numKnownAssignedXids = 0; procArray->tailKnownAssignedXids = 0; procArray->headKnownAssignedXids = 0; SpinLockInit(&procArray->known_assigned_xids_lck); procArray->lastOverflowedXid = InvalidTransactionId; procArray->replication_slot_xmin = InvalidTransactionId; procArray->replication_slot_catalog_xmin = InvalidTransactionId; ShmemVariableCache->xactCompletionCount = 1; } allProcs = ProcGlobal->allProcs; /* Create or attach to the KnownAssignedXids arrays too, if needed */ if (EnableHotStandby) { KnownAssignedXids = (TransactionId *) ShmemInitStruct("KnownAssignedXids", mul_size(sizeof(TransactionId), TOTAL_MAX_CACHED_SUBXIDS), &found); KnownAssignedXidsValid = (bool *) ShmemInitStruct("KnownAssignedXidsValid", mul_size(sizeof(bool), TOTAL_MAX_CACHED_SUBXIDS), &found); } } /* * Add the specified PGPROC to the shared array. */ void ProcArrayAdd(PGPROC *proc) { ProcArrayStruct *arrayP = procArray; int index; int movecount; /* See ProcGlobal comment explaining why both locks are held */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); LWLockAcquire(XidGenLock, LW_EXCLUSIVE); if (arrayP->numProcs >= arrayP->maxProcs) { /* * Oops, no room. (This really shouldn't happen, since there is a * fixed supply of PGPROC structs too, and so we should have failed * earlier.) */ ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); } /* * Keep the procs array sorted by (PGPROC *) so that we can utilize * locality of references much better. This is useful while traversing the * ProcArray because there is an increased likelihood of finding the next * PGPROC structure in the cache. * * Since the occurrence of adding/removing a proc is much lower than the * access to the ProcArray itself, the overhead should be marginal */ for (index = 0; index < arrayP->numProcs; index++) { int procno PG_USED_FOR_ASSERTS_ONLY = arrayP->pgprocnos[index]; Assert(procno >= 0 && procno < (arrayP->maxProcs + NUM_AUXILIARY_PROCS)); Assert(allProcs[procno].pgxactoff == index); /* If we have found our right position in the array, break */ if (arrayP->pgprocnos[index] > proc->pgprocno) break; } movecount = arrayP->numProcs - index; memmove(&arrayP->pgprocnos[index + 1], &arrayP->pgprocnos[index], movecount * sizeof(*arrayP->pgprocnos)); memmove(&ProcGlobal->xids[index + 1], &ProcGlobal->xids[index], movecount * sizeof(*ProcGlobal->xids)); memmove(&ProcGlobal->subxidStates[index + 1], &ProcGlobal->subxidStates[index], movecount * sizeof(*ProcGlobal->subxidStates)); memmove(&ProcGlobal->statusFlags[index + 1], &ProcGlobal->statusFlags[index], movecount * sizeof(*ProcGlobal->statusFlags)); arrayP->pgprocnos[index] = proc->pgprocno; proc->pgxactoff = index; ProcGlobal->xids[index] = proc->xid; ProcGlobal->subxidStates[index] = proc->subxidStatus; ProcGlobal->statusFlags[index] = proc->statusFlags; arrayP->numProcs++; /* adjust pgxactoff for all following PGPROCs */ index++; for (; index < arrayP->numProcs; index++) { int procno = arrayP->pgprocnos[index]; Assert(procno >= 0 && procno < (arrayP->maxProcs + NUM_AUXILIARY_PROCS)); Assert(allProcs[procno].pgxactoff == index - 1); allProcs[procno].pgxactoff = index; } /* * Release in reversed acquisition order, to reduce frequency of having to * wait for XidGenLock while holding ProcArrayLock. */ LWLockRelease(XidGenLock); LWLockRelease(ProcArrayLock); } /* * Remove the specified PGPROC from the shared array. * * When latestXid is a valid XID, we are removing a live 2PC gxact from the * array, and thus causing it to appear as "not running" anymore. In this * case we must advance latestCompletedXid. (This is essentially the same * as ProcArrayEndTransaction followed by removal of the PGPROC, but we take * the ProcArrayLock only once, and don't damage the content of the PGPROC; * twophase.c depends on the latter.) */ void ProcArrayRemove(PGPROC *proc, TransactionId latestXid) { ProcArrayStruct *arrayP = procArray; int myoff; int movecount; #ifdef XIDCACHE_DEBUG /* dump stats at backend shutdown, but not prepared-xact end */ if (proc->pid != 0) DisplayXidCache(); #endif /* See ProcGlobal comment explaining why both locks are held */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); LWLockAcquire(XidGenLock, LW_EXCLUSIVE); myoff = proc->pgxactoff; Assert(myoff >= 0 && myoff < arrayP->numProcs); Assert(ProcGlobal->allProcs[arrayP->pgprocnos[myoff]].pgxactoff == myoff); if (TransactionIdIsValid(latestXid)) { Assert(TransactionIdIsValid(ProcGlobal->xids[myoff])); /* Advance global latestCompletedXid while holding the lock */ MaintainLatestCompletedXid(latestXid); /* Same with xactCompletionCount */ ShmemVariableCache->xactCompletionCount++; ProcGlobal->xids[myoff] = InvalidTransactionId; ProcGlobal->subxidStates[myoff].overflowed = false; ProcGlobal->subxidStates[myoff].count = 0; } else { /* Shouldn't be trying to remove a live transaction here */ Assert(!TransactionIdIsValid(ProcGlobal->xids[myoff])); } Assert(!TransactionIdIsValid(ProcGlobal->xids[myoff])); Assert(ProcGlobal->subxidStates[myoff].count == 0); Assert(ProcGlobal->subxidStates[myoff].overflowed == false); ProcGlobal->statusFlags[myoff] = 0; /* Keep the PGPROC array sorted. See notes above */ movecount = arrayP->numProcs - myoff - 1; memmove(&arrayP->pgprocnos[myoff], &arrayP->pgprocnos[myoff + 1], movecount * sizeof(*arrayP->pgprocnos)); memmove(&ProcGlobal->xids[myoff], &ProcGlobal->xids[myoff + 1], movecount * sizeof(*ProcGlobal->xids)); memmove(&ProcGlobal->subxidStates[myoff], &ProcGlobal->subxidStates[myoff + 1], movecount * sizeof(*ProcGlobal->subxidStates)); memmove(&ProcGlobal->statusFlags[myoff], &ProcGlobal->statusFlags[myoff + 1], movecount * sizeof(*ProcGlobal->statusFlags)); arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */ arrayP->numProcs--; /* * Adjust pgxactoff of following procs for removed PGPROC (note that * numProcs already has been decremented). */ for (int index = myoff; index < arrayP->numProcs; index++) { int procno = arrayP->pgprocnos[index]; Assert(procno >= 0 && procno < (arrayP->maxProcs + NUM_AUXILIARY_PROCS)); Assert(allProcs[procno].pgxactoff - 1 == index); allProcs[procno].pgxactoff = index; } /* * Release in reversed acquisition order, to reduce frequency of having to * wait for XidGenLock while holding ProcArrayLock. */ LWLockRelease(XidGenLock); LWLockRelease(ProcArrayLock); } /* * ProcArrayEndTransaction -- mark a transaction as no longer running * * This is used interchangeably for commit and abort cases. The transaction * commit/abort must already be reported to WAL and pg_xact. * * proc is currently always MyProc, but we pass it explicitly for flexibility. * latestXid is the latest Xid among the transaction's main XID and * subtransactions, or InvalidTransactionId if it has no XID. (We must ask * the caller to pass latestXid, instead of computing it from the PGPROC's * contents, because the subxid information in the PGPROC might be * incomplete.) */ void ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid) { if (TransactionIdIsValid(latestXid)) { /* * We must lock ProcArrayLock while clearing our advertised XID, so * that we do not exit the set of "running" transactions while someone * else is taking a snapshot. See discussion in * src/backend/access/transam/README. */ Assert(TransactionIdIsValid(proc->xid)); /* * If we can immediately acquire ProcArrayLock, we clear our own XID * and release the lock. If not, use group XID clearing to improve * efficiency. */ if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE)) { ProcArrayEndTransactionInternal(proc, latestXid); LWLockRelease(ProcArrayLock); } else ProcArrayGroupClearXid(proc, latestXid); } else { /* * If we have no XID, we don't need to lock, since we won't affect * anyone else's calculation of a snapshot. We might change their * estimate of global xmin, but that's OK. */ Assert(!TransactionIdIsValid(proc->xid)); Assert(proc->subxidStatus.count == 0); Assert(!proc->subxidStatus.overflowed); proc->lxid = InvalidLocalTransactionId; proc->xmin = InvalidTransactionId; proc->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; /* must be cleared with xid/xmin: */ /* avoid unnecessarily dirtying shared cachelines */ if (proc->statusFlags & PROC_VACUUM_STATE_MASK) { Assert(!LWLockHeldByMe(ProcArrayLock)); LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); Assert(proc->statusFlags == ProcGlobal->statusFlags[proc->pgxactoff]); proc->statusFlags &= ~PROC_VACUUM_STATE_MASK; ProcGlobal->statusFlags[proc->pgxactoff] = proc->statusFlags; LWLockRelease(ProcArrayLock); } } } /* * Mark a write transaction as no longer running. * * We don't do any locking here; caller must handle that. */ static inline void ProcArrayEndTransactionInternal(PGPROC *proc, TransactionId latestXid) { int pgxactoff = proc->pgxactoff; /* * Note: we need exclusive lock here because we're going to change other * processes' PGPROC entries. */ Assert(LWLockHeldByMeInMode(ProcArrayLock, LW_EXCLUSIVE)); Assert(TransactionIdIsValid(ProcGlobal->xids[pgxactoff])); Assert(ProcGlobal->xids[pgxactoff] == proc->xid); ProcGlobal->xids[pgxactoff] = InvalidTransactionId; proc->xid = InvalidTransactionId; proc->lxid = InvalidLocalTransactionId; proc->xmin = InvalidTransactionId; proc->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; proc->lastCommittedCSN = pg_atomic_fetch_add_u64(&ShmemVariableCache->nextCommitSeqNo, 1); /* must be cleared with xid/xmin: */ /* avoid unnecessarily dirtying shared cachelines */ if (proc->statusFlags & PROC_VACUUM_STATE_MASK) { proc->statusFlags &= ~PROC_VACUUM_STATE_MASK; ProcGlobal->statusFlags[proc->pgxactoff] = proc->statusFlags; } /* Clear the subtransaction-XID cache too while holding the lock */ Assert(ProcGlobal->subxidStates[pgxactoff].count == proc->subxidStatus.count && ProcGlobal->subxidStates[pgxactoff].overflowed == proc->subxidStatus.overflowed); if (proc->subxidStatus.count > 0 || proc->subxidStatus.overflowed) { ProcGlobal->subxidStates[pgxactoff].count = 0; ProcGlobal->subxidStates[pgxactoff].overflowed = false; proc->subxidStatus.count = 0; proc->subxidStatus.overflowed = false; } /* Also advance global latestCompletedXid while holding the lock */ MaintainLatestCompletedXid(latestXid); /* Same with xactCompletionCount */ ShmemVariableCache->xactCompletionCount++; } /* * ProcArrayGroupClearXid -- group XID clearing * * When we cannot immediately acquire ProcArrayLock in exclusive mode at * commit time, add ourselves to a list of processes that need their XIDs * cleared. The first process to add itself to the list will acquire * ProcArrayLock in exclusive mode and perform ProcArrayEndTransactionInternal * on behalf of all group members. This avoids a great deal of contention * around ProcArrayLock when many processes are trying to commit at once, * since the lock need not be repeatedly handed off from one committing * process to the next. */ static void ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid) { PROC_HDR *procglobal = ProcGlobal; uint32 nextidx; uint32 wakeidx; /* We should definitely have an XID to clear. */ Assert(TransactionIdIsValid(proc->xid)); /* Add ourselves to the list of processes needing a group XID clear. */ proc->procArrayGroupMember = true; proc->procArrayGroupMemberXid = latestXid; nextidx = pg_atomic_read_u32(&procglobal->procArrayGroupFirst); while (true) { pg_atomic_write_u32(&proc->procArrayGroupNext, nextidx); if (pg_atomic_compare_exchange_u32(&procglobal->procArrayGroupFirst, &nextidx, (uint32) proc->pgprocno)) break; } /* * If the list was not empty, the leader will clear our XID. It is * impossible to have followers without a leader because the first process * that has added itself to the list will always have nextidx as * INVALID_PGPROCNO. */ if (nextidx != INVALID_PGPROCNO) { int extraWaits = 0; /* Sleep until the leader clears our XID. */ pgstat_report_wait_start(WAIT_EVENT_PROCARRAY_GROUP_UPDATE); for (;;) { /* acts as a read barrier */ PGSemaphoreLock(proc->sem); if (!proc->procArrayGroupMember) break; extraWaits++; } pgstat_report_wait_end(); Assert(pg_atomic_read_u32(&proc->procArrayGroupNext) == INVALID_PGPROCNO); /* Fix semaphore count for any absorbed wakeups */ while (extraWaits-- > 0) PGSemaphoreUnlock(proc->sem); return; } /* We are the leader. Acquire the lock on behalf of everyone. */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Now that we've got the lock, clear the list of processes waiting for * group XID clearing, saving a pointer to the head of the list. Trying * to pop elements one at a time could lead to an ABA problem. */ nextidx = pg_atomic_exchange_u32(&procglobal->procArrayGroupFirst, INVALID_PGPROCNO); /* Remember head of list so we can perform wakeups after dropping lock. */ wakeidx = nextidx; /* Walk the list and clear all XIDs. */ while (nextidx != INVALID_PGPROCNO) { PGPROC *nextproc = &allProcs[nextidx]; ProcArrayEndTransactionInternal(nextproc, nextproc->procArrayGroupMemberXid); /* Move to next proc in list. */ nextidx = pg_atomic_read_u32(&nextproc->procArrayGroupNext); } /* We're done with the lock now. */ LWLockRelease(ProcArrayLock); /* * Now that we've released the lock, go back and wake everybody up. We * don't do this under the lock so as to keep lock hold times to a * minimum. The system calls we need to perform to wake other processes * up are probably much slower than the simple memory writes we did while * holding the lock. */ while (wakeidx != INVALID_PGPROCNO) { PGPROC *nextproc = &allProcs[wakeidx]; wakeidx = pg_atomic_read_u32(&nextproc->procArrayGroupNext); pg_atomic_write_u32(&nextproc->procArrayGroupNext, INVALID_PGPROCNO); /* ensure all previous writes are visible before follower continues. */ pg_write_barrier(); nextproc->procArrayGroupMember = false; if (nextproc != MyProc) PGSemaphoreUnlock(nextproc->sem); } } /* * ProcArrayClearTransaction -- clear the transaction fields * * This is used after successfully preparing a 2-phase transaction. We are * not actually reporting the transaction's XID as no longer running --- it * will still appear as running because the 2PC's gxact is in the ProcArray * too. We just have to clear out our own PGPROC. */ void ProcArrayClearTransaction(PGPROC *proc) { int pgxactoff; /* * Currently we need to lock ProcArrayLock exclusively here, as we * increment xactCompletionCount below. We also need it at least in shared * mode for pgproc->pgxactoff to stay the same below. * * We could however, as this action does not actually change anyone's view * of the set of running XIDs (our entry is duplicate with the gxact that * has already been inserted into the ProcArray), lower the lock level to * shared if we were to make xactCompletionCount an atomic variable. But * that doesn't seem worth it currently, as a 2PC commit is heavyweight * enough for this not to be the bottleneck. If it ever becomes a * bottleneck it may also be worth considering to combine this with the * subsequent ProcArrayRemove() */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); pgxactoff = proc->pgxactoff; ProcGlobal->xids[pgxactoff] = InvalidTransactionId; proc->xid = InvalidTransactionId; proc->lxid = InvalidLocalTransactionId; proc->xmin = InvalidTransactionId; proc->recoveryConflictPending = false; Assert(!(proc->statusFlags & PROC_VACUUM_STATE_MASK)); Assert(!proc->delayChkpt); /* * Need to increment completion count even though transaction hasn't * really committed yet. The reason for that is that GetSnapshotData() * omits the xid of the current transaction, thus without the increment we * otherwise could end up reusing the snapshot later. Which would be bad, * because it might not count the prepared transaction as running. */ ShmemVariableCache->xactCompletionCount++; /* Clear the subtransaction-XID cache too */ Assert(ProcGlobal->subxidStates[pgxactoff].count == proc->subxidStatus.count && ProcGlobal->subxidStates[pgxactoff].overflowed == proc->subxidStatus.overflowed); if (proc->subxidStatus.count > 0 || proc->subxidStatus.overflowed) { ProcGlobal->subxidStates[pgxactoff].count = 0; ProcGlobal->subxidStates[pgxactoff].overflowed = false; proc->subxidStatus.count = 0; proc->subxidStatus.overflowed = false; } LWLockRelease(ProcArrayLock); } /* * Update ShmemVariableCache->latestCompletedXid to point to latestXid if * currently older. */ static void MaintainLatestCompletedXid(TransactionId latestXid) { FullTransactionId cur_latest = ShmemVariableCache->latestCompletedXid; Assert(FullTransactionIdIsValid(cur_latest)); Assert(!RecoveryInProgress()); Assert(LWLockHeldByMe(ProcArrayLock)); if (TransactionIdPrecedes(XidFromFullTransactionId(cur_latest), latestXid)) { ShmemVariableCache->latestCompletedXid = FullXidRelativeTo(cur_latest, latestXid); } Assert(IsBootstrapProcessingMode() || FullTransactionIdIsNormal(ShmemVariableCache->latestCompletedXid)); } /* * Same as MaintainLatestCompletedXid, except for use during WAL replay. */ static void MaintainLatestCompletedXidRecovery(TransactionId latestXid) { FullTransactionId cur_latest = ShmemVariableCache->latestCompletedXid; FullTransactionId rel; Assert(AmStartupProcess() || !IsUnderPostmaster); Assert(LWLockHeldByMe(ProcArrayLock)); /* * Need a FullTransactionId to compare latestXid with. Can't rely on * latestCompletedXid to be initialized in recovery. But in recovery it's * safe to access nextXid without a lock for the startup process. */ rel = ShmemVariableCache->nextXid; Assert(FullTransactionIdIsValid(ShmemVariableCache->nextXid)); if (!FullTransactionIdIsValid(cur_latest) || TransactionIdPrecedes(XidFromFullTransactionId(cur_latest), latestXid)) { ShmemVariableCache->latestCompletedXid = FullXidRelativeTo(rel, latestXid); } Assert(FullTransactionIdIsNormal(ShmemVariableCache->latestCompletedXid)); } /* * ProcArrayInitRecovery -- initialize recovery xid mgmt environment * * Remember up to where the startup process initialized the CLOG and subtrans * so we can ensure it's initialized gaplessly up to the point where necessary * while in recovery. */ void ProcArrayInitRecovery(TransactionId initializedUptoXID) { Assert(standbyState == STANDBY_INITIALIZED); Assert(TransactionIdIsNormal(initializedUptoXID)); /* * we set latestObservedXid to the xid SUBTRANS has been initialized up * to, so we can extend it from that point onwards in * RecordKnownAssignedTransactionIds, and when we get consistent in * ProcArrayApplyRecoveryInfo(). */ latestObservedXid = initializedUptoXID; TransactionIdRetreat(latestObservedXid); } /* * ProcArrayApplyRecoveryInfo -- apply recovery info about xids * * Takes us through 3 states: Initialized, Pending and Ready. * Normal case is to go all the way to Ready straight away, though there * are atypical cases where we need to take it in steps. * * Use the data about running transactions on the primary to create the initial * state of KnownAssignedXids. We also use these records to regularly prune * KnownAssignedXids because we know it is possible that some transactions * with FATAL errors fail to write abort records, which could cause eventual * overflow. * * See comments for LogStandbySnapshot(). */ void ProcArrayApplyRecoveryInfo(RunningTransactions running) { TransactionId *xids; int nxids; int i; Assert(standbyState >= STANDBY_INITIALIZED); Assert(TransactionIdIsValid(running->nextXid)); Assert(TransactionIdIsValid(running->oldestRunningXid)); Assert(TransactionIdIsNormal(running->latestCompletedXid)); /* * Remove stale transactions, if any. */ ExpireOldKnownAssignedTransactionIds(running->oldestRunningXid); /* * Remove stale locks, if any. */ StandbyReleaseOldLocks(running->oldestRunningXid); /* * If our snapshot is already valid, nothing else to do... */ if (standbyState == STANDBY_SNAPSHOT_READY) return; /* * If our initial RunningTransactionsData had an overflowed snapshot then * we knew we were missing some subxids from our snapshot. If we continue * to see overflowed snapshots then we might never be able to start up, so * we make another test to see if our snapshot is now valid. We know that * the missing subxids are equal to or earlier than nextXid. After we * initialise we continue to apply changes during recovery, so once the * oldestRunningXid is later than the nextXid from the initial snapshot we * know that we no longer have missing information and can mark the * snapshot as valid. */ if (standbyState == STANDBY_SNAPSHOT_PENDING) { /* * If the snapshot isn't overflowed or if its empty we can reset our * pending state and use this snapshot instead. */ if (!running->subxid_overflow || running->xcnt == 0) { /* * If we have already collected known assigned xids, we need to * throw them away before we apply the recovery snapshot. */ KnownAssignedXidsReset(); standbyState = STANDBY_INITIALIZED; } else { if (TransactionIdPrecedes(standbySnapshotPendingXmin, running->oldestRunningXid)) { standbyState = STANDBY_SNAPSHOT_READY; elog(trace_recovery(DEBUG1), "recovery snapshots are now enabled"); } else elog(trace_recovery(DEBUG1), "recovery snapshot waiting for non-overflowed snapshot or " "until oldest active xid on standby is at least %u (now %u)", standbySnapshotPendingXmin, running->oldestRunningXid); return; } } Assert(standbyState == STANDBY_INITIALIZED); /* * NB: this can be reached at least twice, so make sure new code can deal * with that. */ /* * Nobody else is running yet, but take locks anyhow */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * KnownAssignedXids is sorted so we cannot just add the xids, we have to * sort them first. * * Some of the new xids are top-level xids and some are subtransactions. * We don't call SubTransSetParent because it doesn't matter yet. If we * aren't overflowed then all xids will fit in snapshot and so we don't * need subtrans. If we later overflow, an xid assignment record will add * xids to subtrans. If RunningTransactionsData is overflowed then we * don't have enough information to correctly update subtrans anyway. */ /* * Allocate a temporary array to avoid modifying the array passed as * argument. */ xids = palloc(sizeof(TransactionId) * (running->xcnt + running->subxcnt)); /* * Add to the temp array any xids which have not already completed. */ nxids = 0; for (i = 0; i < running->xcnt + running->subxcnt; i++) { TransactionId xid = running->xids[i]; /* * The running-xacts snapshot can contain xids that were still visible * in the procarray when the snapshot was taken, but were already * WAL-logged as completed. They're not running anymore, so ignore * them. */ if (TransactionIdDidCommit(xid) || TransactionIdDidAbort(xid)) continue; xids[nxids++] = xid; } if (nxids > 0) { if (procArray->numKnownAssignedXids != 0) { LWLockRelease(ProcArrayLock); elog(ERROR, "KnownAssignedXids is not empty"); } /* * Sort the array so that we can add them safely into * KnownAssignedXids. * * We have to sort them logically, because in KnownAssignedXidsAdd we * call TransactionIdFollowsOrEquals and so on. But we know these XIDs * come from RUNNING_XACTS, which means there are only normal XIDs from * the same epoch, so this is safe. */ qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator); /* * Add the sorted snapshot into KnownAssignedXids. The running-xacts * snapshot may include duplicated xids because of prepared * transactions, so ignore them. */ for (i = 0; i < nxids; i++) { if (i > 0 && TransactionIdEquals(xids[i - 1], xids[i])) { elog(DEBUG1, "found duplicated transaction %u for KnownAssignedXids insertion", xids[i]); continue; } KnownAssignedXidsAdd(xids[i], xids[i], true); } KnownAssignedXidsDisplay(trace_recovery(DEBUG3)); } pfree(xids); /* * latestObservedXid is at least set to the point where SUBTRANS was * started up to (cf. ProcArrayInitRecovery()) or to the biggest xid * RecordKnownAssignedTransactionIds() was called for. Initialize * subtrans from thereon, up to nextXid - 1. * * We need to duplicate parts of RecordKnownAssignedTransactionId() here, * because we've just added xids to the known assigned xids machinery that * haven't gone through RecordKnownAssignedTransactionId(). */ Assert(TransactionIdIsNormal(latestObservedXid)); TransactionIdAdvance(latestObservedXid); while (TransactionIdPrecedes(latestObservedXid, running->nextXid)) { ExtendSUBTRANS(latestObservedXid); TransactionIdAdvance(latestObservedXid); } TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */ /* ---------- * Now we've got the running xids we need to set the global values that * are used to track snapshots as they evolve further. * * - latestCompletedXid which will be the xmax for snapshots * - lastOverflowedXid which shows whether snapshots overflow * - nextXid * * If the snapshot overflowed, then we still initialise with what we know, * but the recovery snapshot isn't fully valid yet because we know there * are some subxids missing. We don't know the specific subxids that are * missing, so conservatively assume the last one is latestObservedXid. * ---------- */ if (running->subxid_overflow) { standbyState = STANDBY_SNAPSHOT_PENDING; standbySnapshotPendingXmin = latestObservedXid; procArray->lastOverflowedXid = latestObservedXid; } else { standbyState = STANDBY_SNAPSHOT_READY; standbySnapshotPendingXmin = InvalidTransactionId; } /* * If a transaction wrote a commit record in the gap between taking and * logging the snapshot then latestCompletedXid may already be higher than * the value from the snapshot, so check before we use the incoming value. * It also might not yet be set at all. */ MaintainLatestCompletedXidRecovery(running->latestCompletedXid); /* * NB: No need to increment ShmemVariableCache->xactCompletionCount here, * nobody can see it yet. */ LWLockRelease(ProcArrayLock); /* ShmemVariableCache->nextXid must be beyond any observed xid. */ AdvanceNextFullTransactionIdPastXid(latestObservedXid); Assert(FullTransactionIdIsValid(ShmemVariableCache->nextXid)); KnownAssignedXidsDisplay(trace_recovery(DEBUG3)); if (standbyState == STANDBY_SNAPSHOT_READY) elog(trace_recovery(DEBUG1), "recovery snapshots are now enabled"); else elog(trace_recovery(DEBUG1), "recovery snapshot waiting for non-overflowed snapshot or " "until oldest active xid on standby is at least %u (now %u)", standbySnapshotPendingXmin, running->oldestRunningXid); } /* * ProcArrayApplyXidAssignment * Process an XLOG_XACT_ASSIGNMENT WAL record */ void ProcArrayApplyXidAssignment(TransactionId topxid, int nsubxids, TransactionId *subxids) { TransactionId max_xid; int i; Assert(standbyState >= STANDBY_INITIALIZED); max_xid = TransactionIdLatest(topxid, nsubxids, subxids); /* * Mark all the subtransactions as observed. * * NOTE: This will fail if the subxid contains too many previously * unobserved xids to fit into known-assigned-xids. That shouldn't happen * as the code stands, because xid-assignment records should never contain * more than PGPROC_MAX_CACHED_SUBXIDS entries. */ RecordKnownAssignedTransactionIds(max_xid); /* * Notice that we update pg_subtrans with the top-level xid, rather than * the parent xid. This is a difference between normal processing and * recovery, yet is still correct in all cases. The reason is that * subtransaction commit is not marked in clog until commit processing, so * all aborted subtransactions have already been clearly marked in clog. * As a result we are able to refer directly to the top-level * transaction's state rather than skipping through all the intermediate * states in the subtransaction tree. This should be the first time we * have attempted to SubTransSetParent(). */ for (i = 0; i < nsubxids; i++) SubTransSetParent(subxids[i], topxid); /* KnownAssignedXids isn't maintained yet, so we're done for now */ if (standbyState == STANDBY_INITIALIZED) return; /* * Uses same locking as transaction commit */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Remove subxids from known-assigned-xacts. */ KnownAssignedXidsRemoveTree(InvalidTransactionId, nsubxids, subxids); /* * Advance lastOverflowedXid to be at least the last of these subxids. */ if (TransactionIdPrecedes(procArray->lastOverflowedXid, max_xid)) procArray->lastOverflowedXid = max_xid; LWLockRelease(ProcArrayLock); } /* * TransactionIdIsInProgress -- is given transaction running in some backend * * Aside from some shortcuts such as checking RecentXmin and our own Xid, * there are four possibilities for finding a running transaction: * * 1. The given Xid is a main transaction Id. We will find this out cheaply * by looking at ProcGlobal->xids. * * 2. The given Xid is one of the cached subxact Xids in the PGPROC array. * We can find this out cheaply too. * * 3. In Hot Standby mode, we must search the KnownAssignedXids list to see * if the Xid is running on the primary. * * 4. Search the SubTrans tree to find the Xid's topmost parent, and then see * if that is running according to ProcGlobal->xids[] or KnownAssignedXids. * This is the slowest way, but sadly it has to be done always if the others * failed, unless we see that the cached subxact sets are complete (none have * overflowed). * * ProcArrayLock has to be held while we do 1, 2, 3. If we save the top Xids * while doing 1 and 3, we can release the ProcArrayLock while we do 4. * This buys back some concurrency (and we can't retrieve the main Xids from * ProcGlobal->xids[] again anyway; see GetNewTransactionId). */ bool TransactionIdIsInProgress(TransactionId xid) { static TransactionId *xids = NULL; static TransactionId *other_xids; XidCacheStatus *other_subxidstates; int nxids = 0; ProcArrayStruct *arrayP = procArray; TransactionId topxid; TransactionId latestCompletedXid; int mypgxactoff; int numProcs; int j; /* * Don't bother checking a transaction older than RecentXmin; it could not * possibly still be running. (Note: in particular, this guarantees that * we reject InvalidTransactionId, FrozenTransactionId, etc as not * running.) */ if (TransactionIdPrecedes(xid, RecentXmin)) { xc_by_recent_xmin_inc(); return false; } /* * We may have just checked the status of this transaction, so if it is * already known to be completed, we can fall out without any access to * shared memory. */ if (TransactionIdIsKnownCompleted(xid)) { xc_by_known_xact_inc(); return false; } /* * Also, we can handle our own transaction (and subtransactions) without * any access to shared memory. */ if (TransactionIdIsCurrentTransactionId(xid)) { xc_by_my_xact_inc(); return true; } /* * If first time through, get workspace to remember main XIDs in. We * malloc it permanently to avoid repeated palloc/pfree overhead. */ if (xids == NULL) { /* * In hot standby mode, reserve enough space to hold all xids in the * known-assigned list. If we later finish recovery, we no longer need * the bigger array, but we don't bother to shrink it. */ int maxxids = RecoveryInProgress() ? TOTAL_MAX_CACHED_SUBXIDS : arrayP->maxProcs; xids = (TransactionId *) malloc(maxxids * sizeof(TransactionId)); if (xids == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } other_xids = ProcGlobal->xids; other_subxidstates = ProcGlobal->subxidStates; LWLockAcquire(ProcArrayLock, LW_SHARED); /* * Now that we have the lock, we can check latestCompletedXid; if the * target Xid is after that, it's surely still running. */ latestCompletedXid = XidFromFullTransactionId(ShmemVariableCache->latestCompletedXid); if (TransactionIdPrecedes(latestCompletedXid, xid)) { LWLockRelease(ProcArrayLock); xc_by_latest_xid_inc(); return true; } /* No shortcuts, gotta grovel through the array */ mypgxactoff = MyProc->pgxactoff; numProcs = arrayP->numProcs; for (int pgxactoff = 0; pgxactoff < numProcs; pgxactoff++) { int pgprocno; PGPROC *proc; TransactionId pxid; int pxids; /* Ignore ourselves --- dealt with it above */ if (pgxactoff == mypgxactoff) continue; /* Fetch xid just once - see GetNewTransactionId */ pxid = UINT32_ACCESS_ONCE(other_xids[pgxactoff]); if (!TransactionIdIsValid(pxid)) continue; /* * Step 1: check the main Xid */ if (TransactionIdEquals(pxid, xid)) { LWLockRelease(ProcArrayLock); xc_by_main_xid_inc(); return true; } /* * We can ignore main Xids that are younger than the target Xid, since * the target could not possibly be their child. */ if (TransactionIdPrecedes(xid, pxid)) continue; /* * Step 2: check the cached child-Xids arrays */ pxids = other_subxidstates[pgxactoff].count; pg_read_barrier(); /* pairs with barrier in GetNewTransactionId() */ pgprocno = arrayP->pgprocnos[pgxactoff]; proc = &allProcs[pgprocno]; for (j = pxids - 1; j >= 0; j--) { /* Fetch xid just once - see GetNewTransactionId */ TransactionId cxid = UINT32_ACCESS_ONCE(proc->subxids.xids[j]); if (TransactionIdEquals(cxid, xid)) { LWLockRelease(ProcArrayLock); xc_by_child_xid_inc(); return true; } } /* * Save the main Xid for step 4. We only need to remember main Xids * that have uncached children. (Note: there is no race condition * here because the overflowed flag cannot be cleared, only set, while * we hold ProcArrayLock. So we can't miss an Xid that we need to * worry about.) */ if (other_subxidstates[pgxactoff].overflowed) xids[nxids++] = pxid; } /* * Step 3: in hot standby mode, check the known-assigned-xids list. XIDs * in the list must be treated as running. */ if (RecoveryInProgress()) { /* none of the PGPROC entries should have XIDs in hot standby mode */ Assert(nxids == 0); if (KnownAssignedXidExists(xid)) { LWLockRelease(ProcArrayLock); xc_by_known_assigned_inc(); return true; } /* * If the KnownAssignedXids overflowed, we have to check pg_subtrans * too. Fetch all xids from KnownAssignedXids that are lower than * xid, since if xid is a subtransaction its parent will always have a * lower value. Note we will collect both main and subXIDs here, but * there's no help for it. */ if (TransactionIdPrecedesOrEquals(xid, procArray->lastOverflowedXid)) nxids = KnownAssignedXidsGet(xids, xid); } LWLockRelease(ProcArrayLock); /* * If none of the relevant caches overflowed, we know the Xid is not * running without even looking at pg_subtrans. */ if (nxids == 0) { xc_no_overflow_inc(); return false; } /* * Step 4: have to check pg_subtrans. * * At this point, we know it's either a subtransaction of one of the Xids * in xids[], or it's not running. If it's an already-failed * subtransaction, we want to say "not running" even though its parent may * still be running. So first, check pg_xact to see if it's been aborted. */ xc_slow_answer_inc(); if (TransactionIdDidAbort(xid)) return false; /* * It isn't aborted, so check whether the transaction tree it belongs to * is still running (or, more precisely, whether it was running when we * held ProcArrayLock). */ topxid = SubTransGetTopmostTransaction(xid); Assert(TransactionIdIsValid(topxid)); if (!TransactionIdEquals(topxid, xid)) { for (int i = 0; i < nxids; i++) { if (TransactionIdEquals(xids[i], topxid)) return true; } } return false; } /* * TransactionIdIsActive -- is xid the top-level XID of an active backend? * * This differs from TransactionIdIsInProgress in that it ignores prepared * transactions, as well as transactions running on the primary if we're in * hot standby. Also, we ignore subtransactions since that's not needed * for current uses. */ bool TransactionIdIsActive(TransactionId xid) { bool result = false; ProcArrayStruct *arrayP = procArray; TransactionId *other_xids = ProcGlobal->xids; int i; /* * Don't bother checking a transaction older than RecentXmin; it could not * possibly still be running. */ if (TransactionIdPrecedes(xid, RecentXmin)) return false; LWLockAcquire(ProcArrayLock, LW_SHARED); for (i = 0; i < arrayP->numProcs; i++) { int pgprocno = arrayP->pgprocnos[i]; PGPROC *proc = &allProcs[pgprocno]; TransactionId pxid; /* Fetch xid just once - see GetNewTransactionId */ pxid = UINT32_ACCESS_ONCE(other_xids[i]); if (!TransactionIdIsValid(pxid)) continue; if (proc->pid == 0) continue; /* ignore prepared transactions */ if (TransactionIdEquals(pxid, xid)) { result = true; break; } } LWLockRelease(ProcArrayLock); return result; } /* * Determine XID horizons. * * This is used by wrapper functions like GetOldestNonRemovableTransactionId() * (for VACUUM), GetReplicationHorizons() (for hot_standby_feedback), etc as * well as "internally" by GlobalVisUpdate() (see comment above struct * GlobalVisState). * * See the definition of ComputeXidHorizonsResult for the various computed * horizons. * * For VACUUM separate horizons (used to decide which deleted tuples must * be preserved), for shared and non-shared tables are computed. For shared * relations backends in all databases must be considered, but for non-shared * relations that's not required, since only backends in my own database could * ever see the tuples in them. Also, we can ignore concurrently running lazy * VACUUMs because (a) they must be working on other tables, and (b) they * don't need to do snapshot-based lookups. Similarly, for the non-catalog * horizon, we can ignore CREATE INDEX CONCURRENTLY and REINDEX CONCURRENTLY * when they are working on non-partial, non-expressional indexes, for the * same reasons and because they can't run in transaction blocks. (They are * not possible to ignore for catalogs, because CIC and RC do some catalog * operations.) Do note that this means that CIC and RC must use a lock level * that conflicts with VACUUM. * * This also computes a horizon used to truncate pg_subtrans. For that * backends in all databases have to be considered, and concurrently running * lazy VACUUMs cannot be ignored, as they still may perform pg_subtrans * accesses. * * Note: we include all currently running xids in the set of considered xids. * This ensures that if a just-started xact has not yet set its snapshot, * when it does set the snapshot it cannot set xmin less than what we compute. * See notes in src/backend/access/transam/README. * * Note: despite the above, it's possible for the calculated values to move * backwards on repeated calls. The calculated values are conservative, so * that anything older is definitely not considered as running by anyone * anymore, but the exact values calculated depend on a number of things. For * example, if there are no transactions running in the current database, the * horizon for normal tables will be latestCompletedXid. If a transaction * begins after that, its xmin will include in-progress transactions in other * databases that started earlier, so another call will return a lower value. * Nonetheless it is safe to vacuum a table in the current database with the * first result. There are also replication-related effects: a walsender * process can set its xmin based on transactions that are no longer running * on the primary but are still being replayed on the standby, thus possibly * making the values go backwards. In this case there is a possibility that * we lose data that the standby would like to have, but unless the standby * uses a replication slot to make its xmin persistent there is little we can * do about that --- data is only protected if the walsender runs continuously * while queries are executed on the standby. (The Hot Standby code deals * with such cases by failing standby queries that needed to access * already-removed data, so there's no integrity bug.) The computed values * are also adjusted with vacuum_defer_cleanup_age, so increasing that setting * on the fly is another easy way to make horizons move backwards, with no * consequences for data integrity. * * Note: the approximate horizons (see definition of GlobalVisState) are * updated by the computations done here. That's currently required for * correctness and a small optimization. Without doing so it's possible that * heap vacuum's call to heap_page_prune() uses a more conservative horizon * than later when deciding which tuples can be removed - which the code * doesn't expect (breaking HOT). */ static void ComputeXidHorizons(ComputeXidHorizonsResult *h) { ProcArrayStruct *arrayP = procArray; TransactionId kaxmin; bool in_recovery = RecoveryInProgress(); TransactionId *other_xids = ProcGlobal->xids; LWLockAcquire(ProcArrayLock, LW_SHARED); h->latest_completed = ShmemVariableCache->latestCompletedXid; /* * We initialize the MIN() calculation with latestCompletedXid + 1. This * is a lower bound for the XIDs that might appear in the ProcArray later, * and so protects us against overestimating the result due to future * additions. */ { TransactionId initial; initial = XidFromFullTransactionId(h->latest_completed); Assert(TransactionIdIsValid(initial)); TransactionIdAdvance(initial); h->oldest_considered_running = initial; h->shared_oldest_nonremovable = initial; h->catalog_oldest_nonremovable = initial; h->data_oldest_nonremovable = initial; /* * Only modifications made by this backend affect the horizon for * temporary relations. Instead of a check in each iteration of the * loop over all PGPROCs it is cheaper to just initialize to the * current top-level xid any. * * Without an assigned xid we could use a horizon as aggressive as * ReadNewTransactionid(), but we can get away with the much cheaper * latestCompletedXid + 1: If this backend has no xid there, by * definition, can't be any newer changes in the temp table than * latestCompletedXid. */ if (TransactionIdIsValid(MyProc->xid)) h->temp_oldest_nonremovable = MyProc->xid; else h->temp_oldest_nonremovable = initial; } /* * Fetch slot horizons while ProcArrayLock is held - the * LWLockAcquire/LWLockRelease are a barrier, ensuring this happens inside * the lock. */ h->slot_xmin = procArray->replication_slot_xmin; h->slot_catalog_xmin = procArray->replication_slot_catalog_xmin; for (int index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; int8 statusFlags = ProcGlobal->statusFlags[index]; TransactionId xid; TransactionId xmin; /* Fetch xid just once - see GetNewTransactionId */ xid = UINT32_ACCESS_ONCE(other_xids[index]); xmin = UINT32_ACCESS_ONCE(proc->xmin); /* * Consider both the transaction's Xmin, and its Xid. * * We must check both because a transaction might have an Xmin but not * (yet) an Xid; conversely, if it has an Xid, that could determine * some not-yet-set Xmin. */ xmin = TransactionIdOlder(xmin, xid); /* if neither is set, this proc doesn't influence the horizon */ if (!TransactionIdIsValid(xmin)) continue; /* * Don't ignore any procs when determining which transactions might be * considered running. While slots should ensure logical decoding * backends are protected even without this check, it can't hurt to * include them here as well.. */ h->oldest_considered_running = TransactionIdOlder(h->oldest_considered_running, xmin); /* * Skip over backends either vacuuming (which is ok with rows being * removed, as long as pg_subtrans is not truncated) or doing logical * decoding (which manages xmin separately, check below). */ if (statusFlags & (PROC_IN_VACUUM | PROC_IN_LOGICAL_DECODING)) continue; /* shared tables need to take backends in all databases into account */ h->shared_oldest_nonremovable = TransactionIdOlder(h->shared_oldest_nonremovable, xmin); /* * Normally queries in other databases are ignored for anything but * the shared horizon. But in recovery we cannot compute an accurate * per-database horizon as all xids are managed via the * KnownAssignedXids machinery. * * Be careful to compute a pessimistic value when MyDatabaseId is not * set. If this is a backend in the process of starting up, we may not * use a "too aggressive" horizon (otherwise we could end up using it * to prune still needed data away). If the current backend never * connects to a database that is harmless, because * data_oldest_nonremovable will never be utilized. */ if (in_recovery || MyDatabaseId == InvalidOid || proc->databaseId == MyDatabaseId || proc->databaseId == 0) /* always include WalSender */ { /* * We can ignore this backend if it's running CREATE INDEX * CONCURRENTLY or REINDEX CONCURRENTLY on a "safe" index -- but * only on vacuums of user-defined tables. */ if (!(statusFlags & PROC_IN_SAFE_IC)) h->data_oldest_nonremovable = TransactionIdOlder(h->data_oldest_nonremovable, xmin); /* Catalog tables need to consider all backends in this db */ h->catalog_oldest_nonremovable = TransactionIdOlder(h->catalog_oldest_nonremovable, xmin); } } /* catalog horizon should never be later than data */ Assert(TransactionIdPrecedesOrEquals(h->catalog_oldest_nonremovable, h->data_oldest_nonremovable)); /* * If in recovery fetch oldest xid in KnownAssignedXids, will be applied * after lock is released. */ if (in_recovery) kaxmin = KnownAssignedXidsGetOldestXmin(); /* * No other information from shared state is needed, release the lock * immediately. The rest of the computations can be done without a lock. */ LWLockRelease(ProcArrayLock); if (in_recovery) { h->oldest_considered_running = TransactionIdOlder(h->oldest_considered_running, kaxmin); h->shared_oldest_nonremovable = TransactionIdOlder(h->shared_oldest_nonremovable, kaxmin); h->data_oldest_nonremovable = TransactionIdOlder(h->data_oldest_nonremovable, kaxmin); h->catalog_oldest_nonremovable = TransactionIdOlder(h->catalog_oldest_nonremovable, kaxmin); /* temp relations cannot be accessed in recovery */ } else { /* * Compute the cutoff XID by subtracting vacuum_defer_cleanup_age. * * vacuum_defer_cleanup_age provides some additional "slop" for the * benefit of hot standby queries on standby servers. This is quick * and dirty, and perhaps not all that useful unless the primary has a * predictable transaction rate, but it offers some protection when * there's no walsender connection. Note that we are assuming * vacuum_defer_cleanup_age isn't large enough to cause wraparound --- * so guc.c should limit it to no more than the xidStopLimit threshold * in varsup.c. Also note that we intentionally don't apply * vacuum_defer_cleanup_age on standby servers. */ h->oldest_considered_running = TransactionIdRetreatedBy(h->oldest_considered_running, vacuum_defer_cleanup_age); h->shared_oldest_nonremovable = TransactionIdRetreatedBy(h->shared_oldest_nonremovable, vacuum_defer_cleanup_age); h->data_oldest_nonremovable = TransactionIdRetreatedBy(h->data_oldest_nonremovable, vacuum_defer_cleanup_age); h->catalog_oldest_nonremovable = TransactionIdRetreatedBy(h->catalog_oldest_nonremovable, vacuum_defer_cleanup_age); /* defer doesn't apply to temp relations */ } /* * Check whether there are replication slots requiring an older xmin. */ h->shared_oldest_nonremovable = TransactionIdOlder(h->shared_oldest_nonremovable, h->slot_xmin); h->data_oldest_nonremovable = TransactionIdOlder(h->data_oldest_nonremovable, h->slot_xmin); /* * The only difference between catalog / data horizons is that the slot's * catalog xmin is applied to the catalog one (so catalogs can be accessed * for logical decoding). Initialize with data horizon, and then back up * further if necessary. Have to back up the shared horizon as well, since * that also can contain catalogs. */ h->shared_oldest_nonremovable_raw = h->shared_oldest_nonremovable; h->shared_oldest_nonremovable = TransactionIdOlder(h->shared_oldest_nonremovable, h->slot_catalog_xmin); h->catalog_oldest_nonremovable = TransactionIdOlder(h->catalog_oldest_nonremovable, h->slot_xmin); h->catalog_oldest_nonremovable = TransactionIdOlder(h->catalog_oldest_nonremovable, h->slot_catalog_xmin); /* * It's possible that slots / vacuum_defer_cleanup_age backed up the * horizons further than oldest_considered_running. Fix. */ h->oldest_considered_running = TransactionIdOlder(h->oldest_considered_running, h->shared_oldest_nonremovable); h->oldest_considered_running = TransactionIdOlder(h->oldest_considered_running, h->catalog_oldest_nonremovable); h->oldest_considered_running = TransactionIdOlder(h->oldest_considered_running, h->data_oldest_nonremovable); /* * shared horizons have to be at least as old as the oldest visible in * current db */ Assert(TransactionIdPrecedesOrEquals(h->shared_oldest_nonremovable, h->data_oldest_nonremovable)); Assert(TransactionIdPrecedesOrEquals(h->shared_oldest_nonremovable, h->catalog_oldest_nonremovable)); /* * Horizons need to ensure that pg_subtrans access is still possible for * the relevant backends. */ Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->shared_oldest_nonremovable)); Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->catalog_oldest_nonremovable)); Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->data_oldest_nonremovable)); Assert(TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->temp_oldest_nonremovable)); Assert(!TransactionIdIsValid(h->slot_xmin) || TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->slot_xmin)); Assert(!TransactionIdIsValid(h->slot_catalog_xmin) || TransactionIdPrecedesOrEquals(h->oldest_considered_running, h->slot_catalog_xmin)); /* update approximate horizons with the computed horizons */ GlobalVisUpdateApply(h); } /* * Determine what kind of visibility horizon needs to be used for a * relation. If rel is NULL, the most conservative horizon is used. */ static inline GlobalVisHorizonKind GlobalVisHorizonKindForRel(Relation rel) { /* * Other relkkinds currently don't contain xids, nor always the necessary * logical decoding markers. */ Assert(!rel || rel->rd_rel->relkind == RELKIND_RELATION || rel->rd_rel->relkind == RELKIND_MATVIEW || rel->rd_rel->relkind == RELKIND_TOASTVALUE); if (rel == NULL || rel->rd_rel->relisshared || RecoveryInProgress()) return VISHORIZON_SHARED; else if (IsCatalogRelation(rel) || RelationIsAccessibleInLogicalDecoding(rel)) return VISHORIZON_CATALOG; else if (!RELATION_IS_LOCAL(rel)) return VISHORIZON_DATA; else return VISHORIZON_TEMP; } /* * Return the oldest XID for which deleted tuples must be preserved in the * passed table. * * If rel is not NULL the horizon may be considerably more recent than * otherwise (i.e. fewer tuples will be removable). In the NULL case a horizon * that is correct (but not optimal) for all relations will be returned. * * This is used by VACUUM to decide which deleted tuples must be preserved in * the passed in table. */ TransactionId GetOldestNonRemovableTransactionId(Relation rel) { ComputeXidHorizonsResult horizons; ComputeXidHorizons(&horizons); switch (GlobalVisHorizonKindForRel(rel)) { case VISHORIZON_SHARED: return horizons.shared_oldest_nonremovable; case VISHORIZON_CATALOG: return horizons.catalog_oldest_nonremovable; case VISHORIZON_DATA: return horizons.data_oldest_nonremovable; case VISHORIZON_TEMP: return horizons.temp_oldest_nonremovable; } return InvalidTransactionId; } /* * Return the oldest transaction id any currently running backend might still * consider running. This should not be used for visibility / pruning * determinations (see GetOldestNonRemovableTransactionId()), but for * decisions like up to where pg_subtrans can be truncated. */ TransactionId GetOldestTransactionIdConsideredRunning(void) { ComputeXidHorizonsResult horizons; ComputeXidHorizons(&horizons); return horizons.oldest_considered_running; } /* * Return the visibility horizons for a hot standby feedback message. */ void GetReplicationHorizons(TransactionId *xmin, TransactionId *catalog_xmin) { ComputeXidHorizonsResult horizons; ComputeXidHorizons(&horizons); /* * Don't want to use shared_oldest_nonremovable here, as that contains the * effect of replication slot's catalog_xmin. We want to send a separate * feedback for the catalog horizon, so the primary can remove data table * contents more aggressively. */ *xmin = horizons.shared_oldest_nonremovable_raw; *catalog_xmin = horizons.slot_catalog_xmin; } /* * GetMaxSnapshotXidCount -- get max size for snapshot XID array * * We have to export this for use by snapmgr.c. */ int GetMaxSnapshotXidCount(void) { return procArray->maxProcs; } /* * GetMaxSnapshotSubxidCount -- get max size for snapshot sub-XID array * * We have to export this for use by snapmgr.c. */ int GetMaxSnapshotSubxidCount(void) { return TOTAL_MAX_CACHED_SUBXIDS; } /* * Initialize old_snapshot_threshold specific parts of a newly build snapshot. */ static void GetSnapshotDataInitOldSnapshot(Snapshot snapshot) { if (!OldSnapshotThresholdActive()) { /* * If not using "snapshot too old" feature, fill related fields with * dummy values that don't require any locking. */ snapshot->lsn = InvalidXLogRecPtr; snapshot->whenTaken = 0; } else { /* * Capture the current time and WAL stream location in case this * snapshot becomes old enough to need to fall back on the special * "old snapshot" logic. */ snapshot->lsn = GetXLogInsertRecPtr(); snapshot->whenTaken = GetSnapshotCurrentTimestamp(); MaintainOldSnapshotTimeMapping(snapshot->whenTaken, snapshot->xmin); } } /* * Helper function for GetSnapshotData() that checks if the bulk of the * visibility information in the snapshot is still valid. If so, it updates * the fields that need to change and returns true. Otherwise it returns * false. * * This very likely can be evolved to not need ProcArrayLock held (at very * least in the case we already hold a snapshot), but that's for another day. */ static bool GetSnapshotDataReuse(Snapshot snapshot) { uint64 curXactCompletionCount; Assert(LWLockHeldByMe(ProcArrayLock)); if (unlikely(snapshot->snapXactCompletionCount == 0)) return false; curXactCompletionCount = ShmemVariableCache->xactCompletionCount; if (curXactCompletionCount != snapshot->snapXactCompletionCount) return false; /* * If the current xactCompletionCount is still the same as it was at the * time the snapshot was built, we can be sure that rebuilding the * contents of the snapshot the hard way would result in the same snapshot * contents: * * As explained in transam/README, the set of xids considered running by * GetSnapshotData() cannot change while ProcArrayLock is held. Snapshot * contents only depend on transactions with xids and xactCompletionCount * is incremented whenever a transaction with an xid finishes (while * holding ProcArrayLock) exclusively). Thus the xactCompletionCount check * ensures we would detect if the snapshot would have changed. * * As the snapshot contents are the same as it was before, it is safe to * re-enter the snapshot's xmin into the PGPROC array. None of the rows * visible under the snapshot could already have been removed (that'd * require the set of running transactions to change) and it fulfills the * requirement that concurrent GetSnapshotData() calls yield the same * xmin. */ if (!TransactionIdIsValid(MyProc->xmin)) MyProc->xmin = TransactionXmin = snapshot->xmin; RecentXmin = snapshot->xmin; Assert(TransactionIdPrecedesOrEquals(TransactionXmin, RecentXmin)); snapshot->curcid = GetCurrentCommandId(false); snapshot->active_count = 0; snapshot->regd_count = 0; snapshot->copied = false; GetSnapshotDataInitOldSnapshot(snapshot); return true; } /* * GetSnapshotData -- returns information about running transactions. * * The returned snapshot includes xmin (lowest still-running xact ID), * xmax (highest completed xact ID + 1), and a list of running xact IDs * in the range xmin <= xid < xmax. It is used as follows: * All xact IDs < xmin are considered finished. * All xact IDs >= xmax are considered still running. * For an xact ID xmin <= xid < xmax, consult list to see whether * it is considered running or not. * This ensures that the set of transactions seen as "running" by the * current xact will not change after it takes the snapshot. * * All running top-level XIDs are included in the snapshot, except for lazy * VACUUM processes. We also try to include running subtransaction XIDs, * but since PGPROC has only a limited cache area for subxact XIDs, full * information may not be available. If we find any overflowed subxid arrays, * we have to mark the snapshot's subxid data as overflowed, and extra work * *may* need to be done to determine what's running (see XidInMVCCSnapshot() * in heapam_visibility.c). * * We also update the following backend-global variables: * TransactionXmin: the oldest xmin of any snapshot in use in the * current transaction (this is the same as MyProc->xmin). * RecentXmin: the xmin computed for the most recent snapshot. XIDs * older than this are known not running any more. * * And try to advance the bounds of GlobalVis{Shared,Catalog,Data,Temp}Rels * for the benefit of the GlobalVisTest* family of functions. * * Note: this function should probably not be called with an argument that's * not statically allocated (see xip allocation below). */ Snapshot GetSnapshotData(Snapshot snapshot) { ProcArrayStruct *arrayP = procArray; TransactionId *other_xids = ProcGlobal->xids; TransactionId xmin; TransactionId xmax; int count = 0; int subcount = 0; bool suboverflowed = false; FullTransactionId latest_completed; TransactionId oldestxid; int mypgxactoff; TransactionId myxid; uint64 curXactCompletionCount; TransactionId replication_slot_xmin = InvalidTransactionId; TransactionId replication_slot_catalog_xmin = InvalidTransactionId; Assert(snapshot != NULL); /* * Allocating space for maxProcs xids is usually overkill; numProcs would * be sufficient. But it seems better to do the malloc while not holding * the lock, so we can't look at numProcs. Likewise, we allocate much * more subxip storage than is probably needed. * * This does open a possibility for avoiding repeated malloc/free: since * maxProcs does not change at runtime, we can simply reuse the previous * xip arrays if any. (This relies on the fact that all callers pass * static SnapshotData structs.) */ if (snapshot->xip == NULL) { /* * First call for this snapshot. Snapshot is same size whether or not * we are in recovery, see later comments. */ snapshot->xip = (TransactionId *) malloc(GetMaxSnapshotXidCount() * sizeof(TransactionId)); if (snapshot->xip == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); Assert(snapshot->subxip == NULL); snapshot->subxip = (TransactionId *) malloc(GetMaxSnapshotSubxidCount() * sizeof(TransactionId)); if (snapshot->subxip == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } /* * It is sufficient to get shared lock on ProcArrayLock, even if we are * going to set MyProc->xmin. */ LWLockAcquire(ProcArrayLock, LW_SHARED); if (GetSnapshotDataReuse(snapshot)) { if (snapshot_hook) snapshot_hook(snapshot); LWLockRelease(ProcArrayLock); return snapshot; } latest_completed = ShmemVariableCache->latestCompletedXid; mypgxactoff = MyProc->pgxactoff; myxid = other_xids[mypgxactoff]; Assert(myxid == MyProc->xid); oldestxid = ShmemVariableCache->oldestXid; curXactCompletionCount = ShmemVariableCache->xactCompletionCount; /* xmax is always latestCompletedXid + 1 */ xmax = XidFromFullTransactionId(latest_completed); TransactionIdAdvance(xmax); Assert(TransactionIdIsNormal(xmax)); /* initialize xmin calculation with xmax */ xmin = xmax; /* take own xid into account, saves a check inside the loop */ if (TransactionIdIsNormal(myxid) && NormalTransactionIdPrecedes(myxid, xmin)) xmin = myxid; snapshot->takenDuringRecovery = RecoveryInProgress(); if (!snapshot->takenDuringRecovery) { int numProcs = arrayP->numProcs; TransactionId *xip = snapshot->xip; int *pgprocnos = arrayP->pgprocnos; XidCacheStatus *subxidStates = ProcGlobal->subxidStates; uint8 *allStatusFlags = ProcGlobal->statusFlags; /* * First collect set of pgxactoff/xids that need to be included in the * snapshot. */ for (int pgxactoff = 0; pgxactoff < numProcs; pgxactoff++) { /* Fetch xid just once - see GetNewTransactionId */ TransactionId xid = UINT32_ACCESS_ONCE(other_xids[pgxactoff]); uint8 statusFlags; Assert(allProcs[arrayP->pgprocnos[pgxactoff]].pgxactoff == pgxactoff); /* * If the transaction has no XID assigned, we can skip it; it * won't have sub-XIDs either. */ if (likely(xid == InvalidTransactionId)) continue; /* * We don't include our own XIDs (if any) in the snapshot. It * needs to be includeded in the xmin computation, but we did so * outside the loop. */ if (pgxactoff == mypgxactoff) continue; /* * The only way we are able to get here with a non-normal xid is * during bootstrap - with this backend using * BootstrapTransactionId. But the above test should filter that * out. */ Assert(TransactionIdIsNormal(xid)); /* * If the XID is >= xmax, we can skip it; such transactions will * be treated as running anyway (and any sub-XIDs will also be >= * xmax). */ if (!NormalTransactionIdPrecedes(xid, xmax)) continue; /* * Skip over backends doing logical decoding which manages xmin * separately (check below) and ones running LAZY VACUUM. */ statusFlags = allStatusFlags[pgxactoff]; if (statusFlags & (PROC_IN_LOGICAL_DECODING | PROC_IN_VACUUM)) continue; if (NormalTransactionIdPrecedes(xid, xmin)) xmin = xid; /* Add XID to snapshot. */ xip[count++] = xid; /* * Save subtransaction XIDs if possible (if we've already * overflowed, there's no point). Note that the subxact XIDs must * be later than their parent, so no need to check them against * xmin. We could filter against xmax, but it seems better not to * do that much work while holding the ProcArrayLock. * * The other backend can add more subxids concurrently, but cannot * remove any. Hence it's important to fetch nxids just once. * Should be safe to use memcpy, though. (We needn't worry about * missing any xids added concurrently, because they must postdate * xmax.) * * Again, our own XIDs are not included in the snapshot. */ if (!suboverflowed) { if (subxidStates[pgxactoff].overflowed) suboverflowed = true; else { int nsubxids = subxidStates[pgxactoff].count; if (nsubxids > 0) { int pgprocno = pgprocnos[pgxactoff]; PGPROC *proc = &allProcs[pgprocno]; pg_read_barrier(); /* pairs with GetNewTransactionId */ memcpy(snapshot->subxip + subcount, (void *) proc->subxids.xids, nsubxids * sizeof(TransactionId)); subcount += nsubxids; } } } } } else { /* * We're in hot standby, so get XIDs from KnownAssignedXids. * * We store all xids directly into subxip[]. Here's why: * * In recovery we don't know which xids are top-level and which are * subxacts, a design choice that greatly simplifies xid processing. * * It seems like we would want to try to put xids into xip[] only, but * that is fairly small. We would either need to make that bigger or * to increase the rate at which we WAL-log xid assignment; neither is * an appealing choice. * * We could try to store xids into xip[] first and then into subxip[] * if there are too many xids. That only works if the snapshot doesn't * overflow because we do not search subxip[] in that case. A simpler * way is to just store all xids in the subxact array because this is * by far the bigger array. We just leave the xip array empty. * * Either way we need to change the way XidInMVCCSnapshot() works * depending upon when the snapshot was taken, or change normal * snapshot processing so it matches. * * Note: It is possible for recovery to end before we finish taking * the snapshot, and for newly assigned transaction ids to be added to * the ProcArray. xmax cannot change while we hold ProcArrayLock, so * those newly added transaction ids would be filtered away, so we * need not be concerned about them. */ subcount = KnownAssignedXidsGetAndSetXmin(snapshot->subxip, &xmin, xmax); if (TransactionIdPrecedesOrEquals(xmin, procArray->lastOverflowedXid)) suboverflowed = true; } /* * Fetch into local variable while ProcArrayLock is held - the * LWLockRelease below is a barrier, ensuring this happens inside the * lock. */ replication_slot_xmin = procArray->replication_slot_xmin; replication_slot_catalog_xmin = procArray->replication_slot_catalog_xmin; if (!TransactionIdIsValid(MyProc->xmin)) MyProc->xmin = TransactionXmin = xmin; if (snapshot_hook) snapshot_hook(snapshot); LWLockRelease(ProcArrayLock); /* maintain state for GlobalVis* */ { TransactionId def_vis_xid; TransactionId def_vis_xid_data; FullTransactionId def_vis_fxid; FullTransactionId def_vis_fxid_data; FullTransactionId oldestfxid; /* * Converting oldestXid is only safe when xid horizon cannot advance, * i.e. holding locks. While we don't hold the lock anymore, all the * necessary data has been gathered with lock held. */ oldestfxid = FullXidRelativeTo(latest_completed, oldestxid); /* apply vacuum_defer_cleanup_age */ def_vis_xid_data = TransactionIdRetreatedBy(xmin, vacuum_defer_cleanup_age); /* Check whether there's a replication slot requiring an older xmin. */ def_vis_xid_data = TransactionIdOlder(def_vis_xid_data, replication_slot_xmin); /* * Rows in non-shared, non-catalog tables possibly could be vacuumed * if older than this xid. */ def_vis_xid = def_vis_xid_data; /* * Check whether there's a replication slot requiring an older catalog * xmin. */ def_vis_xid = TransactionIdOlder(replication_slot_catalog_xmin, def_vis_xid); def_vis_fxid = FullXidRelativeTo(latest_completed, def_vis_xid); def_vis_fxid_data = FullXidRelativeTo(latest_completed, def_vis_xid_data); /* * Check if we can increase upper bound. As a previous * GlobalVisUpdate() might have computed more aggressive values, don't * overwrite them if so. */ GlobalVisSharedRels.definitely_needed = FullTransactionIdNewer(def_vis_fxid, GlobalVisSharedRels.definitely_needed); GlobalVisCatalogRels.definitely_needed = FullTransactionIdNewer(def_vis_fxid, GlobalVisCatalogRels.definitely_needed); GlobalVisDataRels.definitely_needed = FullTransactionIdNewer(def_vis_fxid_data, GlobalVisDataRels.definitely_needed); /* See temp_oldest_nonremovable computation in ComputeXidHorizons() */ if (TransactionIdIsNormal(myxid)) GlobalVisTempRels.definitely_needed = FullXidRelativeTo(latest_completed, myxid); else { GlobalVisTempRels.definitely_needed = latest_completed; FullTransactionIdAdvance(&GlobalVisTempRels.definitely_needed); } /* * Check if we know that we can initialize or increase the lower * bound. Currently the only cheap way to do so is to use * ShmemVariableCache->oldestXid as input. * * We should definitely be able to do better. We could e.g. put a * global lower bound value into ShmemVariableCache. */ GlobalVisSharedRels.maybe_needed = FullTransactionIdNewer(GlobalVisSharedRels.maybe_needed, oldestfxid); GlobalVisCatalogRels.maybe_needed = FullTransactionIdNewer(GlobalVisCatalogRels.maybe_needed, oldestfxid); GlobalVisDataRels.maybe_needed = FullTransactionIdNewer(GlobalVisDataRels.maybe_needed, oldestfxid); /* accurate value known */ GlobalVisTempRels.maybe_needed = GlobalVisTempRels.definitely_needed; } RecentXmin = xmin; Assert(TransactionIdPrecedesOrEquals(TransactionXmin, RecentXmin)); snapshot->xmin = xmin; snapshot->xmax = xmax; snapshot->xcnt = count; snapshot->subxcnt = subcount; snapshot->suboverflowed = suboverflowed; snapshot->snapXactCompletionCount = curXactCompletionCount; snapshot->curcid = GetCurrentCommandId(false); /* * This is a new snapshot, so set both refcounts are zero, and mark it as * not copied in persistent memory. */ snapshot->active_count = 0; snapshot->regd_count = 0; snapshot->copied = false; GetSnapshotDataInitOldSnapshot(snapshot); return snapshot; } /* * ProcArrayInstallImportedXmin -- install imported xmin into MyProc->xmin * * This is called when installing a snapshot imported from another * transaction. To ensure that OldestXmin doesn't go backwards, we must * check that the source transaction is still running, and we'd better do * that atomically with installing the new xmin. * * Returns true if successful, false if source xact is no longer running. */ bool ProcArrayInstallImportedXmin(TransactionId xmin, VirtualTransactionId *sourcevxid) { bool result = false; ProcArrayStruct *arrayP = procArray; int index; Assert(TransactionIdIsNormal(xmin)); if (!sourcevxid) return false; /* Get lock so source xact can't end while we're doing this */ LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; int statusFlags = ProcGlobal->statusFlags[index]; TransactionId xid; /* Ignore procs running LAZY VACUUM */ if (statusFlags & PROC_IN_VACUUM) continue; /* We are only interested in the specific virtual transaction. */ if (proc->backendId != sourcevxid->backendId) continue; if (proc->lxid != sourcevxid->localTransactionId) continue; /* * We check the transaction's database ID for paranoia's sake: if it's * in another DB then its xmin does not cover us. Caller should have * detected this already, so we just treat any funny cases as * "transaction not found". */ if (proc->databaseId != MyDatabaseId) continue; /* * Likewise, let's just make real sure its xmin does cover us. */ xid = UINT32_ACCESS_ONCE(proc->xmin); if (!TransactionIdIsNormal(xid) || !TransactionIdPrecedesOrEquals(xid, xmin)) continue; /* * We're good. Install the new xmin. As in GetSnapshotData, set * TransactionXmin too. (Note that because snapmgr.c called * GetSnapshotData first, we'll be overwriting a valid xmin here, so * we don't check that.) */ MyProc->xmin = TransactionXmin = xmin; result = true; break; } LWLockRelease(ProcArrayLock); return result; } /* * ProcArrayInstallRestoredXmin -- install restored xmin into MyProc->xmin * * This is like ProcArrayInstallImportedXmin, but we have a pointer to the * PGPROC of the transaction from which we imported the snapshot, rather than * an XID. * * Note that this function also copies statusFlags from the source `proc` in * order to avoid the case where MyProc's xmin needs to be skipped for * computing xid horizon. * * Returns true if successful, false if source xact is no longer running. */ bool ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc) { bool result = false; TransactionId xid; Assert(TransactionIdIsNormal(xmin)); Assert(proc != NULL); /* * Get an exclusive lock so that we can copy statusFlags from source proc. */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Be certain that the referenced PGPROC has an advertised xmin which is * no later than the one we're installing, so that the system-wide xmin * can't go backwards. Also, make sure it's running in the same database, * so that the per-database xmin cannot go backwards. */ xid = UINT32_ACCESS_ONCE(proc->xmin); if (proc->databaseId == MyDatabaseId && TransactionIdIsNormal(xid) && TransactionIdPrecedesOrEquals(xid, xmin)) { /* Install xmin */ MyProc->xmin = TransactionXmin = xmin; /* Flags being copied must be valid copy-able flags. */ Assert((proc->statusFlags & (~PROC_COPYABLE_FLAGS)) == 0); MyProc->statusFlags = proc->statusFlags; ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags; result = true; } LWLockRelease(ProcArrayLock); return result; } /* * GetRunningTransactionData -- returns information about running transactions. * * Similar to GetSnapshotData but returns more information. We include * all PGPROCs with an assigned TransactionId, even VACUUM processes and * prepared transactions. * * We acquire XidGenLock and ProcArrayLock, but the caller is responsible for * releasing them. Acquiring XidGenLock ensures that no new XIDs enter the proc * array until the caller has WAL-logged this snapshot, and releases the * lock. Acquiring ProcArrayLock ensures that no transactions commit until the * lock is released. * * The returned data structure is statically allocated; caller should not * modify it, and must not assume it is valid past the next call. * * This is never executed during recovery so there is no need to look at * KnownAssignedXids. * * Dummy PGPROCs from prepared transaction are included, meaning that this * may return entries with duplicated TransactionId values coming from * transaction finishing to prepare. Nothing is done about duplicated * entries here to not hold on ProcArrayLock more than necessary. * * We don't worry about updating other counters, we want to keep this as * simple as possible and leave GetSnapshotData() as the primary code for * that bookkeeping. * * Note that if any transaction has overflowed its cached subtransactions * then there is no real need include any subtransactions. */ RunningTransactions GetRunningTransactionData(void) { /* result workspace */ static RunningTransactionsData CurrentRunningXactsData; ProcArrayStruct *arrayP = procArray; TransactionId *other_xids = ProcGlobal->xids; RunningTransactions CurrentRunningXacts = &CurrentRunningXactsData; TransactionId latestCompletedXid; TransactionId oldestRunningXid; TransactionId *xids; int index; int count; int subcount; bool suboverflowed; Assert(!RecoveryInProgress()); /* * Allocating space for maxProcs xids is usually overkill; numProcs would * be sufficient. But it seems better to do the malloc while not holding * the lock, so we can't look at numProcs. Likewise, we allocate much * more subxip storage than is probably needed. * * Should only be allocated in bgwriter, since only ever executed during * checkpoints. */ if (CurrentRunningXacts->xids == NULL) { /* * First call */ CurrentRunningXacts->xids = (TransactionId *) malloc(TOTAL_MAX_CACHED_SUBXIDS * sizeof(TransactionId)); if (CurrentRunningXacts->xids == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } xids = CurrentRunningXacts->xids; count = subcount = 0; suboverflowed = false; /* * Ensure that no xids enter or leave the procarray while we obtain * snapshot. */ LWLockAcquire(ProcArrayLock, LW_SHARED); LWLockAcquire(XidGenLock, LW_SHARED); latestCompletedXid = XidFromFullTransactionId(ShmemVariableCache->latestCompletedXid); oldestRunningXid = XidFromFullTransactionId(ShmemVariableCache->nextXid); /* * Spin over procArray collecting all xids */ for (index = 0; index < arrayP->numProcs; index++) { TransactionId xid; /* Fetch xid just once - see GetNewTransactionId */ xid = UINT32_ACCESS_ONCE(other_xids[index]); /* * We don't need to store transactions that don't have a TransactionId * yet because they will not show as running on a standby server. */ if (!TransactionIdIsValid(xid)) continue; /* * Be careful not to exclude any xids before calculating the values of * oldestRunningXid and suboverflowed, since these are used to clean * up transaction information held on standbys. */ if (TransactionIdPrecedes(xid, oldestRunningXid)) oldestRunningXid = xid; if (ProcGlobal->subxidStates[index].overflowed) suboverflowed = true; /* * If we wished to exclude xids this would be the right place for it. * Procs with the PROC_IN_VACUUM flag set don't usually assign xids, * but they do during truncation at the end when they get the lock and * truncate, so it is not much of a problem to include them if they * are seen and it is cleaner to include them. */ xids[count++] = xid; } /* * Spin over procArray collecting all subxids, but only if there hasn't * been a suboverflow. */ if (!suboverflowed) { XidCacheStatus *other_subxidstates = ProcGlobal->subxidStates; for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; int nsubxids; /* * Save subtransaction XIDs. Other backends can't add or remove * entries while we're holding XidGenLock. */ nsubxids = other_subxidstates[index].count; if (nsubxids > 0) { /* barrier not really required, as XidGenLock is held, but ... */ pg_read_barrier(); /* pairs with GetNewTransactionId */ memcpy(&xids[count], (void *) proc->subxids.xids, nsubxids * sizeof(TransactionId)); count += nsubxids; subcount += nsubxids; /* * Top-level XID of a transaction is always less than any of * its subxids, so we don't need to check if any of the * subxids are smaller than oldestRunningXid */ } } } /* * It's important *not* to include the limits set by slots here because * snapbuild.c uses oldestRunningXid to manage its xmin horizon. If those * were to be included here the initial value could never increase because * of a circular dependency where slots only increase their limits when * running xacts increases oldestRunningXid and running xacts only * increases if slots do. */ CurrentRunningXacts->xcnt = count - subcount; CurrentRunningXacts->subxcnt = subcount; CurrentRunningXacts->subxid_overflow = suboverflowed; CurrentRunningXacts->nextXid = XidFromFullTransactionId(ShmemVariableCache->nextXid); CurrentRunningXacts->oldestRunningXid = oldestRunningXid; CurrentRunningXacts->latestCompletedXid = latestCompletedXid; Assert(TransactionIdIsValid(CurrentRunningXacts->nextXid)); Assert(TransactionIdIsValid(CurrentRunningXacts->oldestRunningXid)); Assert(TransactionIdIsNormal(CurrentRunningXacts->latestCompletedXid)); /* We don't release the locks here, the caller is responsible for that */ return CurrentRunningXacts; } /* * GetOldestActiveTransactionId() * * Similar to GetSnapshotData but returns just oldestActiveXid. We include * all PGPROCs with an assigned TransactionId, even VACUUM processes. * We look at all databases, though there is no need to include WALSender * since this has no effect on hot standby conflicts. * * This is never executed during recovery so there is no need to look at * KnownAssignedXids. * * We don't worry about updating other counters, we want to keep this as * simple as possible and leave GetSnapshotData() as the primary code for * that bookkeeping. */ TransactionId GetOldestActiveTransactionId(void) { ProcArrayStruct *arrayP = procArray; TransactionId *other_xids = ProcGlobal->xids; TransactionId oldestRunningXid; int index; Assert(!RecoveryInProgress()); /* * Read nextXid, as the upper bound of what's still active. * * Reading a TransactionId is atomic, but we must grab the lock to make * sure that all XIDs < nextXid are already present in the proc array (or * have already completed), when we spin over it. */ LWLockAcquire(XidGenLock, LW_SHARED); oldestRunningXid = XidFromFullTransactionId(ShmemVariableCache->nextXid); LWLockRelease(XidGenLock); /* * Spin over procArray collecting all xids and subxids. */ LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { TransactionId xid; /* Fetch xid just once - see GetNewTransactionId */ xid = UINT32_ACCESS_ONCE(other_xids[index]); if (!TransactionIdIsNormal(xid)) continue; if (TransactionIdPrecedes(xid, oldestRunningXid)) oldestRunningXid = xid; /* * Top-level XID of a transaction is always less than any of its * subxids, so we don't need to check if any of the subxids are * smaller than oldestRunningXid */ } LWLockRelease(ProcArrayLock); return oldestRunningXid; } /* * GetOldestSafeDecodingTransactionId -- lowest xid not affected by vacuum * * Returns the oldest xid that we can guarantee not to have been affected by * vacuum, i.e. no rows >= that xid have been vacuumed away unless the * transaction aborted. Note that the value can (and most of the time will) be * much more conservative than what really has been affected by vacuum, but we * currently don't have better data available. * * This is useful to initialize the cutoff xid after which a new changeset * extraction replication slot can start decoding changes. * * Must be called with ProcArrayLock held either shared or exclusively, * although most callers will want to use exclusive mode since it is expected * that the caller will immediately use the xid to peg the xmin horizon. */ TransactionId GetOldestSafeDecodingTransactionId(bool catalogOnly) { ProcArrayStruct *arrayP = procArray; TransactionId oldestSafeXid; int index; bool recovery_in_progress = RecoveryInProgress(); Assert(LWLockHeldByMe(ProcArrayLock)); /* * Acquire XidGenLock, so no transactions can acquire an xid while we're * running. If no transaction with xid were running concurrently a new xid * could influence the RecentXmin et al. * * We initialize the computation to nextXid since that's guaranteed to be * a safe, albeit pessimal, value. */ LWLockAcquire(XidGenLock, LW_SHARED); oldestSafeXid = XidFromFullTransactionId(ShmemVariableCache->nextXid); /* * If there's already a slot pegging the xmin horizon, we can start with * that value, it's guaranteed to be safe since it's computed by this * routine initially and has been enforced since. We can always use the * slot's general xmin horizon, but the catalog horizon is only usable * when only catalog data is going to be looked at. */ if (TransactionIdIsValid(procArray->replication_slot_xmin) && TransactionIdPrecedes(procArray->replication_slot_xmin, oldestSafeXid)) oldestSafeXid = procArray->replication_slot_xmin; if (catalogOnly && TransactionIdIsValid(procArray->replication_slot_catalog_xmin) && TransactionIdPrecedes(procArray->replication_slot_catalog_xmin, oldestSafeXid)) oldestSafeXid = procArray->replication_slot_catalog_xmin; /* * If we're not in recovery, we walk over the procarray and collect the * lowest xid. Since we're called with ProcArrayLock held and have * acquired XidGenLock, no entries can vanish concurrently, since * ProcGlobal->xids[i] is only set with XidGenLock held and only cleared * with ProcArrayLock held. * * In recovery we can't lower the safe value besides what we've computed * above, so we'll have to wait a bit longer there. We unfortunately can * *not* use KnownAssignedXidsGetOldestXmin() since the KnownAssignedXids * machinery can miss values and return an older value than is safe. */ if (!recovery_in_progress) { TransactionId *other_xids = ProcGlobal->xids; /* * Spin over procArray collecting min(ProcGlobal->xids[i]) */ for (index = 0; index < arrayP->numProcs; index++) { TransactionId xid; /* Fetch xid just once - see GetNewTransactionId */ xid = UINT32_ACCESS_ONCE(other_xids[index]); if (!TransactionIdIsNormal(xid)) continue; if (TransactionIdPrecedes(xid, oldestSafeXid)) oldestSafeXid = xid; } } LWLockRelease(XidGenLock); return oldestSafeXid; } /* * GetVirtualXIDsDelayingChkpt -- Get the VXIDs of transactions that are * delaying checkpoint because they have critical actions in progress. * * Constructs an array of VXIDs of transactions that are currently in commit * critical sections, as shown by having delayChkpt set in their PGPROC. * * Returns a palloc'd array that should be freed by the caller. * *nvxids is the number of valid entries. * * Note that because backends set or clear delayChkpt without holding any lock, * the result is somewhat indeterminate, but we don't really care. Even in * a multiprocessor with delayed writes to shared memory, it should be certain * that setting of delayChkpt will propagate to shared memory when the backend * takes a lock, so we cannot fail to see a virtual xact as delayChkpt if * it's already inserted its commit record. Whether it takes a little while * for clearing of delayChkpt to propagate is unimportant for correctness. */ VirtualTransactionId * GetVirtualXIDsDelayingChkpt(int *nvxids) { VirtualTransactionId *vxids; ProcArrayStruct *arrayP = procArray; int count = 0; int index; /* allocate what's certainly enough result space */ vxids = (VirtualTransactionId *) palloc(sizeof(VirtualTransactionId) * arrayP->maxProcs); LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (proc->delayChkpt) { VirtualTransactionId vxid; GET_VXID_FROM_PGPROC(vxid, *proc); if (VirtualTransactionIdIsValid(vxid)) vxids[count++] = vxid; } } LWLockRelease(ProcArrayLock); *nvxids = count; return vxids; } /* * HaveVirtualXIDsDelayingChkpt -- Are any of the specified VXIDs delaying? * * This is used with the results of GetVirtualXIDsDelayingChkpt to see if any * of the specified VXIDs are still in critical sections of code. * * Note: this is O(N^2) in the number of vxacts that are/were delaying, but * those numbers should be small enough for it not to be a problem. */ bool HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids) { bool result = false; ProcArrayStruct *arrayP = procArray; int index; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; VirtualTransactionId vxid; GET_VXID_FROM_PGPROC(vxid, *proc); if (proc->delayChkpt && VirtualTransactionIdIsValid(vxid)) { int i; for (i = 0; i < nvxids; i++) { if (VirtualTransactionIdEquals(vxid, vxids[i])) { result = true; break; } } if (result) break; } } LWLockRelease(ProcArrayLock); return result; } /* * BackendPidGetProc -- get a backend's PGPROC given its PID * * Returns NULL if not found. Note that it is up to the caller to be * sure that the question remains meaningful for long enough for the * answer to be used ... */ PGPROC * BackendPidGetProc(int pid) { PGPROC *result; if (pid == 0) /* never match dummy PGPROCs */ return NULL; LWLockAcquire(ProcArrayLock, LW_SHARED); result = BackendPidGetProcWithLock(pid); LWLockRelease(ProcArrayLock); return result; } /* * BackendPidGetProcWithLock -- get a backend's PGPROC given its PID * * Same as above, except caller must be holding ProcArrayLock. The found * entry, if any, can be assumed to be valid as long as the lock remains held. */ PGPROC * BackendPidGetProcWithLock(int pid) { PGPROC *result = NULL; ProcArrayStruct *arrayP = procArray; int index; if (pid == 0) /* never match dummy PGPROCs */ return NULL; for (index = 0; index < arrayP->numProcs; index++) { PGPROC *proc = &allProcs[arrayP->pgprocnos[index]]; if (proc->pid == pid) { result = proc; break; } } return result; } /* * BackendXidGetPid -- get a backend's pid given its XID * * Returns 0 if not found or it's a prepared transaction. Note that * it is up to the caller to be sure that the question remains * meaningful for long enough for the answer to be used ... * * Only main transaction Ids are considered. This function is mainly * useful for determining what backend owns a lock. * * Beware that not every xact has an XID assigned. However, as long as you * only call this using an XID found on disk, you're safe. */ int BackendXidGetPid(TransactionId xid) { int result = 0; ProcArrayStruct *arrayP = procArray; TransactionId *other_xids = ProcGlobal->xids; int index; if (xid == InvalidTransactionId) /* never match invalid xid */ return 0; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (other_xids[index] == xid) { result = proc->pid; break; } } LWLockRelease(ProcArrayLock); return result; } /* * IsBackendPid -- is a given pid a running backend * * This is not called by the backend, but is called by external modules. */ bool IsBackendPid(int pid) { return (BackendPidGetProc(pid) != NULL); } /* * GetCurrentVirtualXIDs -- returns an array of currently active VXIDs. * * The array is palloc'd. The number of valid entries is returned into *nvxids. * * The arguments allow filtering the set of VXIDs returned. Our own process * is always skipped. In addition: * If limitXmin is not InvalidTransactionId, skip processes with * xmin > limitXmin. * If excludeXmin0 is true, skip processes with xmin = 0. * If allDbs is false, skip processes attached to other databases. * If excludeVacuum isn't zero, skip processes for which * (statusFlags & excludeVacuum) is not zero. * * Note: the purpose of the limitXmin and excludeXmin0 parameters is to * allow skipping backends whose oldest live snapshot is no older than * some snapshot we have. Since we examine the procarray with only shared * lock, there are race conditions: a backend could set its xmin just after * we look. Indeed, on multiprocessors with weak memory ordering, the * other backend could have set its xmin *before* we look. We know however * that such a backend must have held shared ProcArrayLock overlapping our * own hold of ProcArrayLock, else we would see its xmin update. Therefore, * any snapshot the other backend is taking concurrently with our scan cannot * consider any transactions as still running that we think are committed * (since backends must hold ProcArrayLock exclusive to commit). */ VirtualTransactionId * GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0, bool allDbs, int excludeVacuum, int *nvxids) { VirtualTransactionId *vxids; ProcArrayStruct *arrayP = procArray; int count = 0; int index; /* allocate what's certainly enough result space */ vxids = (VirtualTransactionId *) palloc(sizeof(VirtualTransactionId) * arrayP->maxProcs); LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; uint8 statusFlags = ProcGlobal->statusFlags[index]; if (proc == MyProc) continue; if (excludeVacuum & statusFlags) continue; if (allDbs || proc->databaseId == MyDatabaseId) { /* Fetch xmin just once - might change on us */ TransactionId pxmin = UINT32_ACCESS_ONCE(proc->xmin); if (excludeXmin0 && !TransactionIdIsValid(pxmin)) continue; /* * InvalidTransactionId precedes all other XIDs, so a proc that * hasn't set xmin yet will not be rejected by this test. */ if (!TransactionIdIsValid(limitXmin) || TransactionIdPrecedesOrEquals(pxmin, limitXmin)) { VirtualTransactionId vxid; GET_VXID_FROM_PGPROC(vxid, *proc); if (VirtualTransactionIdIsValid(vxid)) vxids[count++] = vxid; } } } LWLockRelease(ProcArrayLock); *nvxids = count; return vxids; } /* * GetConflictingVirtualXIDs -- returns an array of currently active VXIDs. * * Usage is limited to conflict resolution during recovery on standby servers. * limitXmin is supplied as either latestRemovedXid, or InvalidTransactionId * in cases where we cannot accurately determine a value for latestRemovedXid. * * If limitXmin is InvalidTransactionId then we want to kill everybody, * so we're not worried if they have a snapshot or not, nor does it really * matter what type of lock we hold. * * All callers that are checking xmins always now supply a valid and useful * value for limitXmin. The limitXmin is always lower than the lowest * numbered KnownAssignedXid that is not already a FATAL error. This is * because we only care about cleanup records that are cleaning up tuple * versions from committed transactions. In that case they will only occur * at the point where the record is less than the lowest running xid. That * allows us to say that if any backend takes a snapshot concurrently with * us then the conflict assessment made here would never include the snapshot * that is being derived. So we take LW_SHARED on the ProcArray and allow * concurrent snapshots when limitXmin is valid. We might think about adding * Assert(limitXmin < lowest(KnownAssignedXids)) * but that would not be true in the case of FATAL errors lagging in array, * but we already know those are bogus anyway, so we skip that test. * * If dbOid is valid we skip backends attached to other databases. * * Be careful to *not* pfree the result from this function. We reuse * this array sufficiently often that we use malloc for the result. */ VirtualTransactionId * GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid) { static VirtualTransactionId *vxids; ProcArrayStruct *arrayP = procArray; int count = 0; int index; /* * If first time through, get workspace to remember main XIDs in. We * malloc it permanently to avoid repeated palloc/pfree overhead. Allow * result space, remembering room for a terminator. */ if (vxids == NULL) { vxids = (VirtualTransactionId *) malloc(sizeof(VirtualTransactionId) * (arrayP->maxProcs + 1)); if (vxids == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; /* Exclude prepared transactions */ if (proc->pid == 0) continue; if (!OidIsValid(dbOid) || proc->databaseId == dbOid) { /* Fetch xmin just once - can't change on us, but good coding */ TransactionId pxmin = UINT32_ACCESS_ONCE(proc->xmin); /* * We ignore an invalid pxmin because this means that backend has * no snapshot currently. We hold a Share lock to avoid contention * with users taking snapshots. That is not a problem because the * current xmin is always at least one higher than the latest * removed xid, so any new snapshot would never conflict with the * test here. */ if (!TransactionIdIsValid(limitXmin) || (TransactionIdIsValid(pxmin) && !TransactionIdFollows(pxmin, limitXmin))) { VirtualTransactionId vxid; GET_VXID_FROM_PGPROC(vxid, *proc); if (VirtualTransactionIdIsValid(vxid)) vxids[count++] = vxid; } } } LWLockRelease(ProcArrayLock); /* add the terminator */ vxids[count].backendId = InvalidBackendId; vxids[count].localTransactionId = InvalidLocalTransactionId; return vxids; } /* * CancelVirtualTransaction - used in recovery conflict processing * * Returns pid of the process signaled, or 0 if not found. */ pid_t CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode) { return SignalVirtualTransaction(vxid, sigmode, true); } pid_t SignalVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode, bool conflictPending) { ProcArrayStruct *arrayP = procArray; int index; pid_t pid = 0; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; VirtualTransactionId procvxid; GET_VXID_FROM_PGPROC(procvxid, *proc); if (procvxid.backendId == vxid.backendId && procvxid.localTransactionId == vxid.localTransactionId) { proc->recoveryConflictPending = conflictPending; pid = proc->pid; if (pid != 0) { /* * Kill the pid if it's still here. If not, that's what we * wanted so ignore any errors. */ (void) SendProcSignal(pid, sigmode, vxid.backendId); } break; } } LWLockRelease(ProcArrayLock); return pid; } /* * MinimumActiveBackends --- count backends (other than myself) that are * in active transactions. Return true if the count exceeds the * minimum threshold passed. This is used as a heuristic to decide if * a pre-XLOG-flush delay is worthwhile during commit. * * Do not count backends that are blocked waiting for locks, since they are * not going to get to run until someone else commits. */ bool MinimumActiveBackends(int min) { ProcArrayStruct *arrayP = procArray; int count = 0; int index; /* Quick short-circuit if no minimum is specified */ if (min == 0) return true; /* * Note: for speed, we don't acquire ProcArrayLock. This is a little bit * bogus, but since we are only testing fields for zero or nonzero, it * should be OK. The result is only used for heuristic purposes anyway... */ for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; /* * Since we're not holding a lock, need to be prepared to deal with * garbage, as someone could have incremented numProcs but not yet * filled the structure. * * If someone just decremented numProcs, 'proc' could also point to a * PGPROC entry that's no longer in the array. It still points to a * PGPROC struct, though, because freed PGPROC entries just go to the * free list and are recycled. Its contents are nonsense in that case, * but that's acceptable for this function. */ if (pgprocno == -1) continue; /* do not count deleted entries */ if (proc == MyProc) continue; /* do not count myself */ if (proc->xid == InvalidTransactionId) continue; /* do not count if no XID assigned */ if (proc->pid == 0) continue; /* do not count prepared xacts */ if (proc->waitLock != NULL) continue; /* do not count if blocked on a lock */ count++; if (count >= min) break; } return count >= min; } /* * CountDBBackends --- count backends that are using specified database */ int CountDBBackends(Oid databaseid) { ProcArrayStruct *arrayP = procArray; int count = 0; int index; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (proc->pid == 0) continue; /* do not count prepared xacts */ if (!OidIsValid(databaseid) || proc->databaseId == databaseid) count++; } LWLockRelease(ProcArrayLock); return count; } /* * CountDBConnections --- counts database backends ignoring any background * worker processes */ int CountDBConnections(Oid databaseid) { ProcArrayStruct *arrayP = procArray; int count = 0; int index; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (proc->pid == 0) continue; /* do not count prepared xacts */ if (proc->isBackgroundWorker) continue; /* do not count background workers */ if (!OidIsValid(databaseid) || proc->databaseId == databaseid) count++; } LWLockRelease(ProcArrayLock); return count; } /* * CancelDBBackends --- cancel backends that are using specified database */ void CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending) { ProcArrayStruct *arrayP = procArray; int index; /* tell all backends to die */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (databaseid == InvalidOid || proc->databaseId == databaseid) { VirtualTransactionId procvxid; pid_t pid; GET_VXID_FROM_PGPROC(procvxid, *proc); proc->recoveryConflictPending = conflictPending; pid = proc->pid; if (pid != 0) { /* * Kill the pid if it's still here. If not, that's what we * wanted so ignore any errors. */ (void) SendProcSignal(pid, sigmode, procvxid.backendId); } } } LWLockRelease(ProcArrayLock); } /* * CountUserBackends --- count backends that are used by specified user */ int CountUserBackends(Oid roleid) { ProcArrayStruct *arrayP = procArray; int count = 0; int index; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; if (proc->pid == 0) continue; /* do not count prepared xacts */ if (proc->isBackgroundWorker) continue; /* do not count background workers */ if (proc->roleId == roleid) count++; } LWLockRelease(ProcArrayLock); return count; } /* * CountOtherDBBackends -- check for other backends running in the given DB * * If there are other backends in the DB, we will wait a maximum of 5 seconds * for them to exit. Autovacuum backends are encouraged to exit early by * sending them SIGTERM, but normal user backends are just waited for. * * The current backend is always ignored; it is caller's responsibility to * check whether the current backend uses the given DB, if it's important. * * Returns true if there are (still) other backends in the DB, false if not. * Also, *nbackends and *nprepared are set to the number of other backends * and prepared transactions in the DB, respectively. * * This function is used to interlock DROP DATABASE and related commands * against there being any active backends in the target DB --- dropping the * DB while active backends remain would be a Bad Thing. Note that we cannot * detect here the possibility of a newly-started backend that is trying to * connect to the doomed database, so additional interlocking is needed during * backend startup. The caller should normally hold an exclusive lock on the * target DB before calling this, which is one reason we mustn't wait * indefinitely. */ bool CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared) { ProcArrayStruct *arrayP = procArray; #define MAXAUTOVACPIDS 10 /* max autovacs to SIGTERM per iteration */ int autovac_pids[MAXAUTOVACPIDS]; int tries; /* 50 tries with 100ms sleep between tries makes 5 sec total wait */ for (tries = 0; tries < 50; tries++) { int nautovacs = 0; bool found = false; int index; CHECK_FOR_INTERRUPTS(); *nbackends = *nprepared = 0; LWLockAcquire(ProcArrayLock, LW_SHARED); for (index = 0; index < arrayP->numProcs; index++) { int pgprocno = arrayP->pgprocnos[index]; PGPROC *proc = &allProcs[pgprocno]; uint8 statusFlags = ProcGlobal->statusFlags[index]; if (proc->databaseId != databaseId) continue; if (proc == MyProc) continue; found = true; if (proc->pid == 0) (*nprepared)++; else { (*nbackends)++; if ((statusFlags & PROC_IS_AUTOVACUUM) && nautovacs < MAXAUTOVACPIDS) autovac_pids[nautovacs++] = proc->pid; } } LWLockRelease(ProcArrayLock); if (!found) return false; /* no conflicting backends, so done */ /* * Send SIGTERM to any conflicting autovacuums before sleeping. We * postpone this step until after the loop because we don't want to * hold ProcArrayLock while issuing kill(). We have no idea what might * block kill() inside the kernel... */ for (index = 0; index < nautovacs; index++) (void) kill(autovac_pids[index], SIGTERM); /* ignore any error */ /* sleep, then try again */ pg_usleep(100 * 1000L); /* 100ms */ } return true; /* timed out, still conflicts */ } /* * Terminate existing connections to the specified database. This routine * is used by the DROP DATABASE command when user has asked to forcefully * drop the database. * * The current backend is always ignored; it is caller's responsibility to * check whether the current backend uses the given DB, if it's important. * * It doesn't allow to terminate the connections even if there is a one * backend with the prepared transaction in the target database. */ void TerminateOtherDBBackends(Oid databaseId) { ProcArrayStruct *arrayP = procArray; List *pids = NIL; int nprepared = 0; int i; LWLockAcquire(ProcArrayLock, LW_SHARED); for (i = 0; i < procArray->numProcs; i++) { int pgprocno = arrayP->pgprocnos[i]; PGPROC *proc = &allProcs[pgprocno]; if (proc->databaseId != databaseId) continue; if (proc == MyProc) continue; if (proc->pid != 0) pids = lappend_int(pids, proc->pid); else nprepared++; } LWLockRelease(ProcArrayLock); if (nprepared > 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("database \"%s\" is being used by prepared transactions", get_database_name(databaseId)), errdetail_plural("There is %d prepared transaction using the database.", "There are %d prepared transactions using the database.", nprepared, nprepared))); if (pids) { ListCell *lc; /* * Check whether we have the necessary rights to terminate other * sessions. We don't terminate any session until we ensure that we * have rights on all the sessions to be terminated. These checks are * the same as we do in pg_terminate_backend. * * In this case we don't raise some warnings - like "PID %d is not a * PostgreSQL server process", because for us already finished session * is not a problem. */ foreach(lc, pids) { int pid = lfirst_int(lc); PGPROC *proc = BackendPidGetProc(pid); if (proc != NULL) { /* Only allow superusers to signal superuser-owned backends. */ if (superuser_arg(proc->roleId) && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be a superuser to terminate superuser process"))); /* Users can signal backends they have role membership in. */ if (!has_privs_of_role(GetUserId(), proc->roleId) && !has_privs_of_role(GetUserId(), ROLE_PG_SIGNAL_BACKEND)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be a member of the role whose process is being terminated or member of pg_signal_backend"))); } } /* * There's a race condition here: once we release the ProcArrayLock, * it's possible for the session to exit before we issue kill. That * race condition possibility seems too unlikely to worry about. See * pg_signal_backend. */ foreach(lc, pids) { int pid = lfirst_int(lc); PGPROC *proc = BackendPidGetProc(pid); if (proc != NULL) { /* * If we have setsid(), signal the backend's whole process * group */ #ifdef HAVE_SETSID (void) kill(-pid, SIGTERM); #else (void) kill(pid, SIGTERM); #endif } } } } /* * ProcArraySetReplicationSlotXmin * * Install limits to future computations of the xmin horizon to prevent vacuum * and HOT pruning from removing affected rows still needed by clients with * replication slots. */ void ProcArraySetReplicationSlotXmin(TransactionId xmin, TransactionId catalog_xmin, bool already_locked) { Assert(!already_locked || LWLockHeldByMe(ProcArrayLock)); if (!already_locked) LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); procArray->replication_slot_xmin = xmin; procArray->replication_slot_catalog_xmin = catalog_xmin; if (!already_locked) LWLockRelease(ProcArrayLock); } /* * ProcArrayGetReplicationSlotXmin * * Return the current slot xmin limits. That's useful to be able to remove * data that's older than those limits. */ void ProcArrayGetReplicationSlotXmin(TransactionId *xmin, TransactionId *catalog_xmin) { LWLockAcquire(ProcArrayLock, LW_SHARED); if (xmin != NULL) *xmin = procArray->replication_slot_xmin; if (catalog_xmin != NULL) *catalog_xmin = procArray->replication_slot_catalog_xmin; LWLockRelease(ProcArrayLock); } /* * XidCacheRemoveRunningXids * * Remove a bunch of TransactionIds from the list of known-running * subtransactions for my backend. Both the specified xid and those in * the xids[] array (of length nxids) are removed from the subxids cache. * latestXid must be the latest XID among the group. */ void XidCacheRemoveRunningXids(TransactionId xid, int nxids, const TransactionId *xids, TransactionId latestXid) { int i, j; XidCacheStatus *mysubxidstat; Assert(TransactionIdIsValid(xid)); /* * We must hold ProcArrayLock exclusively in order to remove transactions * from the PGPROC array. (See src/backend/access/transam/README.) It's * possible this could be relaxed since we know this routine is only used * to abort subtransactions, but pending closer analysis we'd best be * conservative. * * Note that we do not have to be careful about memory ordering of our own * reads wrt. GetNewTransactionId() here - only this process can modify * relevant fields of MyProc/ProcGlobal->xids[]. But we do have to be * careful about our own writes being well ordered. */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); mysubxidstat = &ProcGlobal->subxidStates[MyProc->pgxactoff]; /* * Under normal circumstances xid and xids[] will be in increasing order, * as will be the entries in subxids. Scan backwards to avoid O(N^2) * behavior when removing a lot of xids. */ for (i = nxids - 1; i >= 0; i--) { TransactionId anxid = xids[i]; for (j = MyProc->subxidStatus.count - 1; j >= 0; j--) { if (TransactionIdEquals(MyProc->subxids.xids[j], anxid)) { MyProc->subxids.xids[j] = MyProc->subxids.xids[MyProc->subxidStatus.count - 1]; pg_write_barrier(); mysubxidstat->count--; MyProc->subxidStatus.count--; break; } } /* * Ordinarily we should have found it, unless the cache has * overflowed. However it's also possible for this routine to be * invoked multiple times for the same subtransaction, in case of an * error during AbortSubTransaction. So instead of Assert, emit a * debug warning. */ if (j < 0 && !MyProc->subxidStatus.overflowed) elog(WARNING, "did not find subXID %u in MyProc", anxid); } for (j = MyProc->subxidStatus.count - 1; j >= 0; j--) { if (TransactionIdEquals(MyProc->subxids.xids[j], xid)) { MyProc->subxids.xids[j] = MyProc->subxids.xids[MyProc->subxidStatus.count - 1]; pg_write_barrier(); mysubxidstat->count--; MyProc->subxidStatus.count--; break; } } /* Ordinarily we should have found it, unless the cache has overflowed */ if (j < 0 && !MyProc->subxidStatus.overflowed) elog(WARNING, "did not find subXID %u in MyProc", xid); /* Also advance global latestCompletedXid while holding the lock */ MaintainLatestCompletedXid(latestXid); /* ... and xactCompletionCount */ ShmemVariableCache->xactCompletionCount++; LWLockRelease(ProcArrayLock); } #ifdef XIDCACHE_DEBUG /* * Print stats about effectiveness of XID cache */ static void DisplayXidCache(void) { fprintf(stderr, "XidCache: xmin: %ld, known: %ld, myxact: %ld, latest: %ld, mainxid: %ld, childxid: %ld, knownassigned: %ld, nooflo: %ld, slow: %ld\n", xc_by_recent_xmin, xc_by_known_xact, xc_by_my_xact, xc_by_latest_xid, xc_by_main_xid, xc_by_child_xid, xc_by_known_assigned, xc_no_overflow, xc_slow_answer); } #endif /* XIDCACHE_DEBUG */ /* * If rel != NULL, return test state appropriate for relation, otherwise * return state usable for all relations. The latter may consider XIDs as * not-yet-visible-to-everyone that a state for a specific relation would * already consider visible-to-everyone. * * This needs to be called while a snapshot is active or registered, otherwise * there are wraparound and other dangers. * * See comment for GlobalVisState for details. */ GlobalVisState * GlobalVisTestFor(Relation rel) { GlobalVisState *state = NULL; /* XXX: we should assert that a snapshot is pushed or registered */ Assert(RecentXmin); switch (GlobalVisHorizonKindForRel(rel)) { case VISHORIZON_SHARED: state = &GlobalVisSharedRels; break; case VISHORIZON_CATALOG: state = &GlobalVisCatalogRels; break; case VISHORIZON_DATA: state = &GlobalVisDataRels; break; case VISHORIZON_TEMP: state = &GlobalVisTempRels; break; } Assert(FullTransactionIdIsValid(state->definitely_needed) && FullTransactionIdIsValid(state->maybe_needed)); return state; } /* * Return true if it's worth updating the accurate maybe_needed boundary. * * As it is somewhat expensive to determine xmin horizons, we don't want to * repeatedly do so when there is a low likelihood of it being beneficial. * * The current heuristic is that we update only if RecentXmin has changed * since the last update. If the oldest currently running transaction has not * finished, it is unlikely that recomputing the horizon would be useful. */ static bool GlobalVisTestShouldUpdate(GlobalVisState *state) { /* hasn't been updated yet */ if (!TransactionIdIsValid(ComputeXidHorizonsResultLastXmin)) return true; /* * If the maybe_needed/definitely_needed boundaries are the same, it's * unlikely to be beneficial to refresh boundaries. */ if (FullTransactionIdFollowsOrEquals(state->maybe_needed, state->definitely_needed)) return false; /* does the last snapshot built have a different xmin? */ return RecentXmin != ComputeXidHorizonsResultLastXmin; } static void GlobalVisUpdateApply(ComputeXidHorizonsResult *horizons) { GlobalVisSharedRels.maybe_needed = FullXidRelativeTo(horizons->latest_completed, horizons->shared_oldest_nonremovable); GlobalVisCatalogRels.maybe_needed = FullXidRelativeTo(horizons->latest_completed, horizons->catalog_oldest_nonremovable); GlobalVisDataRels.maybe_needed = FullXidRelativeTo(horizons->latest_completed, horizons->data_oldest_nonremovable); GlobalVisTempRels.maybe_needed = FullXidRelativeTo(horizons->latest_completed, horizons->temp_oldest_nonremovable); /* * In longer running transactions it's possible that transactions we * previously needed to treat as running aren't around anymore. So update * definitely_needed to not be earlier than maybe_needed. */ GlobalVisSharedRels.definitely_needed = FullTransactionIdNewer(GlobalVisSharedRels.maybe_needed, GlobalVisSharedRels.definitely_needed); GlobalVisCatalogRels.definitely_needed = FullTransactionIdNewer(GlobalVisCatalogRels.maybe_needed, GlobalVisCatalogRels.definitely_needed); GlobalVisDataRels.definitely_needed = FullTransactionIdNewer(GlobalVisDataRels.maybe_needed, GlobalVisDataRels.definitely_needed); GlobalVisTempRels.definitely_needed = GlobalVisTempRels.maybe_needed; ComputeXidHorizonsResultLastXmin = RecentXmin; } /* * Update boundaries in GlobalVis{Shared,Catalog, Data}Rels * using ComputeXidHorizons(). */ static void GlobalVisUpdate(void) { ComputeXidHorizonsResult horizons; /* updates the horizons as a side-effect */ ComputeXidHorizons(&horizons); } /* * Return true if no snapshot still considers fxid to be running. * * The state passed needs to have been initialized for the relation fxid is * from (NULL is also OK), otherwise the result may not be correct. * * See comment for GlobalVisState for details. */ bool GlobalVisTestIsRemovableFullXid(GlobalVisState *state, FullTransactionId fxid) { /* * If fxid is older than maybe_needed bound, it definitely is visible to * everyone. */ if (FullTransactionIdPrecedes(fxid, state->maybe_needed)) return true; /* * If fxid is >= definitely_needed bound, it is very likely to still be * considered running. */ if (FullTransactionIdFollowsOrEquals(fxid, state->definitely_needed)) return false; /* * fxid is between maybe_needed and definitely_needed, i.e. there might or * might not exist a snapshot considering fxid running. If it makes sense, * update boundaries and recheck. */ if (GlobalVisTestShouldUpdate(state)) { GlobalVisUpdate(); Assert(FullTransactionIdPrecedes(fxid, state->definitely_needed)); return FullTransactionIdPrecedes(fxid, state->maybe_needed); } else return false; } /* * Wrapper around GlobalVisTestIsRemovableFullXid() for 32bit xids. * * It is crucial that this only gets called for xids from a source that * protects against xid wraparounds (e.g. from a table and thus protected by * relfrozenxid). */ bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid) { FullTransactionId fxid; /* * Convert 32 bit argument to FullTransactionId. We can do so safely * because we know the xid has to, at the very least, be between * [oldestXid, nextFullXid), i.e. within 2 billion of xid. To avoid taking * a lock to determine either, we can just compare with * state->definitely_needed, which was based on those value at the time * the current snapshot was built. */ fxid = FullXidRelativeTo(state->definitely_needed, xid); return GlobalVisTestIsRemovableFullXid(state, fxid); } /* * Return FullTransactionId below which all transactions are not considered * running anymore. * * Note: This is less efficient than testing with * GlobalVisTestIsRemovableFullXid as it likely requires building an accurate * cutoff, even in the case all the XIDs compared with the cutoff are outside * [maybe_needed, definitely_needed). */ FullTransactionId GlobalVisTestNonRemovableFullHorizon(GlobalVisState *state) { /* acquire accurate horizon if not already done */ if (GlobalVisTestShouldUpdate(state)) GlobalVisUpdate(); return state->maybe_needed; } /* Convenience wrapper around GlobalVisTestNonRemovableFullHorizon */ TransactionId GlobalVisTestNonRemovableHorizon(GlobalVisState *state) { FullTransactionId cutoff; cutoff = GlobalVisTestNonRemovableFullHorizon(state); return XidFromFullTransactionId(cutoff); } /* * Convenience wrapper around GlobalVisTestFor() and * GlobalVisTestIsRemovableFullXid(), see their comments. */ bool GlobalVisCheckRemovableFullXid(Relation rel, FullTransactionId fxid) { GlobalVisState *state; state = GlobalVisTestFor(rel); return GlobalVisTestIsRemovableFullXid(state, fxid); } /* * Convenience wrapper around GlobalVisTestFor() and * GlobalVisTestIsRemovableXid(), see their comments. */ bool GlobalVisCheckRemovableXid(Relation rel, TransactionId xid) { GlobalVisState *state; state = GlobalVisTestFor(rel); return GlobalVisTestIsRemovableXid(state, xid); } /* * Convert a 32 bit transaction id into 64 bit transaction id, by assuming it * is within MaxTransactionId / 2 of XidFromFullTransactionId(rel). * * Be very careful about when to use this function. It can only safely be used * when there is a guarantee that xid is within MaxTransactionId / 2 xids of * rel. That e.g. can be guaranteed if the caller assures a snapshot is * held by the backend and xid is from a table (where vacuum/freezing ensures * the xid has to be within that range), or if xid is from the procarray and * prevents xid wraparound that way. */ static inline FullTransactionId FullXidRelativeTo(FullTransactionId rel, TransactionId xid) { TransactionId rel_xid = XidFromFullTransactionId(rel); Assert(TransactionIdIsValid(xid)); Assert(TransactionIdIsValid(rel_xid)); /* not guaranteed to find issues, but likely to catch mistakes */ AssertTransactionIdInAllowableRange(xid); return FullTransactionIdFromU64(U64FromFullTransactionId(rel) + (int32) (xid - rel_xid)); } /* ---------------------------------------------- * KnownAssignedTransactionIds sub-module * ---------------------------------------------- */ /* * In Hot Standby mode, we maintain a list of transactions that are (or were) * running on the primary at the current point in WAL. These XIDs must be * treated as running by standby transactions, even though they are not in * the standby server's PGPROC array. * * We record all XIDs that we know have been assigned. That includes all the * XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have * been assigned. We can deduce the existence of unobserved XIDs because we * know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids * list expands as new XIDs are observed or inferred, and contracts when * transaction completion records arrive. * * During hot standby we do not fret too much about the distinction between * top-level XIDs and subtransaction XIDs. We store both together in the * KnownAssignedXids list. In backends, this is copied into snapshots in * GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot() * doesn't care about the distinction either. Subtransaction XIDs are * effectively treated as top-level XIDs and in the typical case pg_subtrans * links are *not* maintained (which does not affect visibility). * * We have room in KnownAssignedXids and in snapshots to hold maxProcs * * (1 + PGPROC_MAX_CACHED_SUBXIDS) XIDs, so every primary transaction must * report its subtransaction XIDs in a WAL XLOG_XACT_ASSIGNMENT record at * least every PGPROC_MAX_CACHED_SUBXIDS. When we receive one of these * records, we mark the subXIDs as children of the top XID in pg_subtrans, * and then remove them from KnownAssignedXids. This prevents overflow of * KnownAssignedXids and snapshots, at the cost that status checks for these * subXIDs will take a slower path through TransactionIdIsInProgress(). * This means that KnownAssignedXids is not necessarily complete for subXIDs, * though it should be complete for top-level XIDs; this is the same situation * that holds with respect to the PGPROC entries in normal running. * * When we throw away subXIDs from KnownAssignedXids, we need to keep track of * that, similarly to tracking overflow of a PGPROC's subxids array. We do * that by remembering the lastOverflowedXid, ie the last thrown-away subXID. * As long as that is within the range of interesting XIDs, we have to assume * that subXIDs are missing from snapshots. (Note that subXID overflow occurs * on primary when 65th subXID arrives, whereas on standby it occurs when 64th * subXID arrives - that is not an error.) * * Should a backend on primary somehow disappear before it can write an abort * record, then we just leave those XIDs in KnownAssignedXids. They actually * aborted but we think they were running; the distinction is irrelevant * because either way any changes done by the transaction are not visible to * backends in the standby. We prune KnownAssignedXids when * XLOG_RUNNING_XACTS arrives, to forestall possible overflow of the * array due to such dead XIDs. */ /* * RecordKnownAssignedTransactionIds * Record the given XID in KnownAssignedXids, as well as any preceding * unobserved XIDs. * * RecordKnownAssignedTransactionIds() should be run for *every* WAL record * associated with a transaction. Must be called for each record after we * have executed StartupCLOG() et al, since we must ExtendCLOG() etc.. * * Called during recovery in analogy with and in place of GetNewTransactionId() */ void RecordKnownAssignedTransactionIds(TransactionId xid) { Assert(standbyState >= STANDBY_INITIALIZED); Assert(TransactionIdIsValid(xid)); Assert(TransactionIdIsValid(latestObservedXid)); elog(trace_recovery(DEBUG4), "record known xact %u latestObservedXid %u", xid, latestObservedXid); /* * When a newly observed xid arrives, it is frequently the case that it is * *not* the next xid in sequence. When this occurs, we must treat the * intervening xids as running also. */ if (TransactionIdFollows(xid, latestObservedXid)) { TransactionId next_expected_xid; /* * Extend subtrans like we do in GetNewTransactionId() during normal * operation using individual extend steps. Note that we do not need * to extend clog since its extensions are WAL logged. * * This part has to be done regardless of standbyState since we * immediately start assigning subtransactions to their toplevel * transactions. */ next_expected_xid = latestObservedXid; while (TransactionIdPrecedes(next_expected_xid, xid)) { TransactionIdAdvance(next_expected_xid); ExtendSUBTRANS(next_expected_xid); } Assert(next_expected_xid == xid); /* * If the KnownAssignedXids machinery isn't up yet, there's nothing * more to do since we don't track assigned xids yet. */ if (standbyState <= STANDBY_INITIALIZED) { latestObservedXid = xid; return; } /* * Add (latestObservedXid, xid] onto the KnownAssignedXids array. */ next_expected_xid = latestObservedXid; TransactionIdAdvance(next_expected_xid); KnownAssignedXidsAdd(next_expected_xid, xid, false); /* * Now we can advance latestObservedXid */ latestObservedXid = xid; /* ShmemVariableCache->nextXid must be beyond any observed xid */ AdvanceNextFullTransactionIdPastXid(latestObservedXid); next_expected_xid = latestObservedXid; TransactionIdAdvance(next_expected_xid); } } /* * ExpireTreeKnownAssignedTransactionIds * Remove the given XIDs from KnownAssignedXids. * * Called during recovery in analogy with and in place of ProcArrayEndTransaction() */ void ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids, TransactionId *subxids, TransactionId max_xid) { Assert(standbyState >= STANDBY_INITIALIZED); /* * Uses same locking as transaction commit */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); KnownAssignedXidsRemoveTree(xid, nsubxids, subxids); /* As in ProcArrayEndTransaction, advance latestCompletedXid */ MaintainLatestCompletedXidRecovery(max_xid); /* ... and xactCompletionCount */ ShmemVariableCache->xactCompletionCount++; LWLockRelease(ProcArrayLock); } /* * ExpireAllKnownAssignedTransactionIds * Remove all entries in KnownAssignedXids and reset lastOverflowedXid. */ void ExpireAllKnownAssignedTransactionIds(void) { LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); KnownAssignedXidsRemovePreceding(InvalidTransactionId); /* * Reset lastOverflowedXid. Currently, lastOverflowedXid has no use after * the call of this function. But do this for unification with what * ExpireOldKnownAssignedTransactionIds() do. */ procArray->lastOverflowedXid = InvalidTransactionId; LWLockRelease(ProcArrayLock); } /* * ExpireOldKnownAssignedTransactionIds * Remove KnownAssignedXids entries preceding the given XID and * potentially reset lastOverflowedXid. */ void ExpireOldKnownAssignedTransactionIds(TransactionId xid) { LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Reset lastOverflowedXid if we know all transactions that have been * possibly running are being gone. Not doing so could cause an incorrect * lastOverflowedXid value, which makes extra snapshots be marked as * suboverflowed. */ if (TransactionIdPrecedes(procArray->lastOverflowedXid, xid)) procArray->lastOverflowedXid = InvalidTransactionId; KnownAssignedXidsRemovePreceding(xid); LWLockRelease(ProcArrayLock); } /* * Private module functions to manipulate KnownAssignedXids * * There are 5 main uses of the KnownAssignedXids data structure: * * * backends taking snapshots - all valid XIDs need to be copied out * * backends seeking to determine presence of a specific XID * * startup process adding new known-assigned XIDs * * startup process removing specific XIDs as transactions end * * startup process pruning array when special WAL records arrive * * This data structure is known to be a hot spot during Hot Standby, so we * go to some lengths to make these operations as efficient and as concurrent * as possible. * * The XIDs are stored in an array in sorted order --- TransactionIdPrecedes * order, to be exact --- to allow binary search for specific XIDs. Note: * in general TransactionIdPrecedes would not provide a total order, but * we know that the entries present at any instant should not extend across * a large enough fraction of XID space to wrap around (the primary would * shut down for fear of XID wrap long before that happens). So it's OK to * use TransactionIdPrecedes as a binary-search comparator. * * It's cheap to maintain the sortedness during insertions, since new known * XIDs are always reported in XID order; we just append them at the right. * * To keep individual deletions cheap, we need to allow gaps in the array. * This is implemented by marking array elements as valid or invalid using * the parallel boolean array KnownAssignedXidsValid[]. A deletion is done * by setting KnownAssignedXidsValid[i] to false, *without* clearing the * XID entry itself. This preserves the property that the XID entries are * sorted, so we can do binary searches easily. Periodically we compress * out the unused entries; that's much cheaper than having to compress the * array immediately on every deletion. * * The actually valid items in KnownAssignedXids[] and KnownAssignedXidsValid[] * are those with indexes tail <= i < head; items outside this subscript range * have unspecified contents. When head reaches the end of the array, we * force compression of unused entries rather than wrapping around, since * allowing wraparound would greatly complicate the search logic. We maintain * an explicit tail pointer so that pruning of old XIDs can be done without * immediately moving the array contents. In most cases only a small fraction * of the array contains valid entries at any instant. * * Although only the startup process can ever change the KnownAssignedXids * data structure, we still need interlocking so that standby backends will * not observe invalid intermediate states. The convention is that backends * must hold shared ProcArrayLock to examine the array. To remove XIDs from * the array, the startup process must hold ProcArrayLock exclusively, for * the usual transactional reasons (compare commit/abort of a transaction * during normal running). Compressing unused entries out of the array * likewise requires exclusive lock. To add XIDs to the array, we just insert * them into slots to the right of the head pointer and then advance the head * pointer. This wouldn't require any lock at all, except that on machines * with weak memory ordering we need to be careful that other processors * see the array element changes before they see the head pointer change. * We handle this by using a spinlock to protect reads and writes of the * head/tail pointers. (We could dispense with the spinlock if we were to * create suitable memory access barrier primitives and use those instead.) * The spinlock must be taken to read or write the head/tail pointers unless * the caller holds ProcArrayLock exclusively. * * Algorithmic analysis: * * If we have a maximum of M slots, with N XIDs currently spread across * S elements then we have N <= S <= M always. * * * Adding a new XID is O(1) and needs little locking (unless compression * must happen) * * Compressing the array is O(S) and requires exclusive lock * * Removing an XID is O(logS) and requires exclusive lock * * Taking a snapshot is O(S) and requires shared lock * * Checking for an XID is O(logS) and requires shared lock * * In comparison, using a hash table for KnownAssignedXids would mean that * taking snapshots would be O(M). If we can maintain S << M then the * sorted array technique will deliver significantly faster snapshots. * If we try to keep S too small then we will spend too much time compressing, * so there is an optimal point for any workload mix. We use a heuristic to * decide when to compress the array, though trimming also helps reduce * frequency of compressing. The heuristic requires us to track the number of * currently valid XIDs in the array. */ /* * Compress KnownAssignedXids by shifting valid data down to the start of the * array, removing any gaps. * * A compression step is forced if "force" is true, otherwise we do it * only if a heuristic indicates it's a good time to do it. * * Caller must hold ProcArrayLock in exclusive mode. */ static void KnownAssignedXidsCompress(bool force) { ProcArrayStruct *pArray = procArray; int head, tail; int compress_index; int i; /* no spinlock required since we hold ProcArrayLock exclusively */ head = pArray->headKnownAssignedXids; tail = pArray->tailKnownAssignedXids; if (!force) { /* * If we can choose how much to compress, use a heuristic to avoid * compressing too often or not often enough. * * Heuristic is if we have a large enough current spread and less than * 50% of the elements are currently in use, then compress. This * should ensure we compress fairly infrequently. We could compress * less often though the virtual array would spread out more and * snapshots would become more expensive. */ int nelements = head - tail; if (nelements < 4 * PROCARRAY_MAXPROCS || nelements < 2 * pArray->numKnownAssignedXids) return; } /* * We compress the array by reading the valid values from tail to head, * re-aligning data to 0th element. */ compress_index = 0; for (i = tail; i < head; i++) { if (KnownAssignedXidsValid[i]) { KnownAssignedXids[compress_index] = KnownAssignedXids[i]; KnownAssignedXidsValid[compress_index] = true; compress_index++; } } pArray->tailKnownAssignedXids = 0; pArray->headKnownAssignedXids = compress_index; } /* * Add xids into KnownAssignedXids at the head of the array. * * xids from from_xid to to_xid, inclusive, are added to the array. * * If exclusive_lock is true then caller already holds ProcArrayLock in * exclusive mode, so we need no extra locking here. Else caller holds no * lock, so we need to be sure we maintain sufficient interlocks against * concurrent readers. (Only the startup process ever calls this, so no need * to worry about concurrent writers.) */ static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, bool exclusive_lock) { ProcArrayStruct *pArray = procArray; TransactionId next_xid; int head, tail; int nxids; int i; Assert(TransactionIdPrecedesOrEquals(from_xid, to_xid)); /* * Calculate how many array slots we'll need. Normally this is cheap; in * the unusual case where the XIDs cross the wrap point, we do it the hard * way. */ if (to_xid >= from_xid) nxids = to_xid - from_xid + 1; else { nxids = 1; next_xid = from_xid; while (TransactionIdPrecedes(next_xid, to_xid)) { nxids++; TransactionIdAdvance(next_xid); } } /* * Since only the startup process modifies the head/tail pointers, we * don't need a lock to read them here. */ head = pArray->headKnownAssignedXids; tail = pArray->tailKnownAssignedXids; Assert(head >= 0 && head <= pArray->maxKnownAssignedXids); Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids); /* * Verify that insertions occur in TransactionId sequence. Note that even * if the last existing element is marked invalid, it must still have a * correctly sequenced XID value. */ if (head > tail && TransactionIdFollowsOrEquals(KnownAssignedXids[head - 1], from_xid)) { KnownAssignedXidsDisplay(LOG); elog(ERROR, "out-of-order XID insertion in KnownAssignedXids"); } /* * If our xids won't fit in the remaining space, compress out free space */ if (head + nxids > pArray->maxKnownAssignedXids) { /* must hold lock to compress */ if (!exclusive_lock) LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); KnownAssignedXidsCompress(true); head = pArray->headKnownAssignedXids; /* note: we no longer care about the tail pointer */ if (!exclusive_lock) LWLockRelease(ProcArrayLock); /* * If it still won't fit then we're out of memory */ if (head + nxids > pArray->maxKnownAssignedXids) elog(ERROR, "too many KnownAssignedXids"); } /* Now we can insert the xids into the space starting at head */ next_xid = from_xid; for (i = 0; i < nxids; i++) { KnownAssignedXids[head] = next_xid; KnownAssignedXidsValid[head] = true; TransactionIdAdvance(next_xid); head++; } /* Adjust count of number of valid entries */ pArray->numKnownAssignedXids += nxids; /* * Now update the head pointer. We use a spinlock to protect this * pointer, not because the update is likely to be non-atomic, but to * ensure that other processors see the above array updates before they * see the head pointer change. * * If we're holding ProcArrayLock exclusively, there's no need to take the * spinlock. */ if (exclusive_lock) pArray->headKnownAssignedXids = head; else { SpinLockAcquire(&pArray->known_assigned_xids_lck); pArray->headKnownAssignedXids = head; SpinLockRelease(&pArray->known_assigned_xids_lck); } } /* * KnownAssignedXidsSearch * * Searches KnownAssignedXids for a specific xid and optionally removes it. * Returns true if it was found, false if not. * * Caller must hold ProcArrayLock in shared or exclusive mode. * Exclusive lock must be held for remove = true. */ static bool KnownAssignedXidsSearch(TransactionId xid, bool remove) { ProcArrayStruct *pArray = procArray; int first, last; int head; int tail; int result_index = -1; if (remove) { /* we hold ProcArrayLock exclusively, so no need for spinlock */ tail = pArray->tailKnownAssignedXids; head = pArray->headKnownAssignedXids; } else { /* take spinlock to ensure we see up-to-date array contents */ SpinLockAcquire(&pArray->known_assigned_xids_lck); tail = pArray->tailKnownAssignedXids; head = pArray->headKnownAssignedXids; SpinLockRelease(&pArray->known_assigned_xids_lck); } /* * Standard binary search. Note we can ignore the KnownAssignedXidsValid * array here, since even invalid entries will contain sorted XIDs. */ first = tail; last = head - 1; while (first <= last) { int mid_index; TransactionId mid_xid; mid_index = (first + last) / 2; mid_xid = KnownAssignedXids[mid_index]; if (xid == mid_xid) { result_index = mid_index; break; } else if (TransactionIdPrecedes(xid, mid_xid)) last = mid_index - 1; else first = mid_index + 1; } if (result_index < 0) return false; /* not in array */ if (!KnownAssignedXidsValid[result_index]) return false; /* in array, but invalid */ if (remove) { KnownAssignedXidsValid[result_index] = false; pArray->numKnownAssignedXids--; Assert(pArray->numKnownAssignedXids >= 0); /* * If we're removing the tail element then advance tail pointer over * any invalid elements. This will speed future searches. */ if (result_index == tail) { tail++; while (tail < head && !KnownAssignedXidsValid[tail]) tail++; if (tail >= head) { /* Array is empty, so we can reset both pointers */ pArray->headKnownAssignedXids = 0; pArray->tailKnownAssignedXids = 0; } else { pArray->tailKnownAssignedXids = tail; } } } return true; } /* * Is the specified XID present in KnownAssignedXids[]? * * Caller must hold ProcArrayLock in shared or exclusive mode. */ static bool KnownAssignedXidExists(TransactionId xid) { Assert(TransactionIdIsValid(xid)); return KnownAssignedXidsSearch(xid, false); } /* * Remove the specified XID from KnownAssignedXids[]. * * Caller must hold ProcArrayLock in exclusive mode. */ static void KnownAssignedXidsRemove(TransactionId xid) { Assert(TransactionIdIsValid(xid)); elog(trace_recovery(DEBUG4), "remove KnownAssignedXid %u", xid); /* * Note: we cannot consider it an error to remove an XID that's not * present. We intentionally remove subxact IDs while processing * XLOG_XACT_ASSIGNMENT, to avoid array overflow. Then those XIDs will be * removed again when the top-level xact commits or aborts. * * It might be possible to track such XIDs to distinguish this case from * actual errors, but it would be complicated and probably not worth it. * So, just ignore the search result. */ (void) KnownAssignedXidsSearch(xid, true); } /* * KnownAssignedXidsRemoveTree * Remove xid (if it's not InvalidTransactionId) and all the subxids. * * Caller must hold ProcArrayLock in exclusive mode. */ static void KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids, TransactionId *subxids) { int i; if (TransactionIdIsValid(xid)) KnownAssignedXidsRemove(xid); for (i = 0; i < nsubxids; i++) KnownAssignedXidsRemove(subxids[i]); /* Opportunistically compress the array */ KnownAssignedXidsCompress(false); } /* * Prune KnownAssignedXids up to, but *not* including xid. If xid is invalid * then clear the whole table. * * Caller must hold ProcArrayLock in exclusive mode. */ static void KnownAssignedXidsRemovePreceding(TransactionId removeXid) { ProcArrayStruct *pArray = procArray; int count = 0; int head, tail, i; if (!TransactionIdIsValid(removeXid)) { elog(trace_recovery(DEBUG4), "removing all KnownAssignedXids"); pArray->numKnownAssignedXids = 0; pArray->headKnownAssignedXids = pArray->tailKnownAssignedXids = 0; return; } elog(trace_recovery(DEBUG4), "prune KnownAssignedXids to %u", removeXid); /* * Mark entries invalid starting at the tail. Since array is sorted, we * can stop as soon as we reach an entry >= removeXid. */ tail = pArray->tailKnownAssignedXids; head = pArray->headKnownAssignedXids; for (i = tail; i < head; i++) { if (KnownAssignedXidsValid[i]) { TransactionId knownXid = KnownAssignedXids[i]; if (TransactionIdFollowsOrEquals(knownXid, removeXid)) break; if (!StandbyTransactionIdIsPrepared(knownXid)) { KnownAssignedXidsValid[i] = false; count++; } } } pArray->numKnownAssignedXids -= count; Assert(pArray->numKnownAssignedXids >= 0); /* * Advance the tail pointer if we've marked the tail item invalid. */ for (i = tail; i < head; i++) { if (KnownAssignedXidsValid[i]) break; } if (i >= head) { /* Array is empty, so we can reset both pointers */ pArray->headKnownAssignedXids = 0; pArray->tailKnownAssignedXids = 0; } else { pArray->tailKnownAssignedXids = i; } /* Opportunistically compress the array */ KnownAssignedXidsCompress(false); } /* * KnownAssignedXidsGet - Get an array of xids by scanning KnownAssignedXids. * We filter out anything >= xmax. * * Returns the number of XIDs stored into xarray[]. Caller is responsible * that array is large enough. * * Caller must hold ProcArrayLock in (at least) shared mode. */ static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax) { TransactionId xtmp = InvalidTransactionId; return KnownAssignedXidsGetAndSetXmin(xarray, &xtmp, xmax); } /* * KnownAssignedXidsGetAndSetXmin - as KnownAssignedXidsGet, plus * we reduce *xmin to the lowest xid value seen if not already lower. * * Caller must hold ProcArrayLock in (at least) shared mode. */ static int KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin, TransactionId xmax) { int count = 0; int head, tail; int i; /* * Fetch head just once, since it may change while we loop. We can stop * once we reach the initially seen head, since we are certain that an xid * cannot enter and then leave the array while we hold ProcArrayLock. We * might miss newly-added xids, but they should be >= xmax so irrelevant * anyway. * * Must take spinlock to ensure we see up-to-date array contents. */ SpinLockAcquire(&procArray->known_assigned_xids_lck); tail = procArray->tailKnownAssignedXids; head = procArray->headKnownAssignedXids; SpinLockRelease(&procArray->known_assigned_xids_lck); for (i = tail; i < head; i++) { /* Skip any gaps in the array */ if (KnownAssignedXidsValid[i]) { TransactionId knownXid = KnownAssignedXids[i]; /* * Update xmin if required. Only the first XID need be checked, * since the array is sorted. */ if (count == 0 && TransactionIdPrecedes(knownXid, *xmin)) *xmin = knownXid; /* * Filter out anything >= xmax, again relying on sorted property * of array. */ if (TransactionIdIsValid(xmax) && TransactionIdFollowsOrEquals(knownXid, xmax)) break; /* Add knownXid into output array */ xarray[count++] = knownXid; } } return count; } /* * Get oldest XID in the KnownAssignedXids array, or InvalidTransactionId * if nothing there. */ static TransactionId KnownAssignedXidsGetOldestXmin(void) { int head, tail; int i; /* * Fetch head just once, since it may change while we loop. */ SpinLockAcquire(&procArray->known_assigned_xids_lck); tail = procArray->tailKnownAssignedXids; head = procArray->headKnownAssignedXids; SpinLockRelease(&procArray->known_assigned_xids_lck); for (i = tail; i < head; i++) { /* Skip any gaps in the array */ if (KnownAssignedXidsValid[i]) return KnownAssignedXids[i]; } return InvalidTransactionId; } /* * Display KnownAssignedXids to provide debug trail * * Currently this is only called within startup process, so we need no * special locking. * * Note this is pretty expensive, and much of the expense will be incurred * even if the elog message will get discarded. It's not currently called * in any performance-critical places, however, so no need to be tenser. */ static void KnownAssignedXidsDisplay(int trace_level) { ProcArrayStruct *pArray = procArray; StringInfoData buf; int head, tail, i; int nxids = 0; tail = pArray->tailKnownAssignedXids; head = pArray->headKnownAssignedXids; initStringInfo(&buf); for (i = tail; i < head; i++) { if (KnownAssignedXidsValid[i]) { nxids++; appendStringInfo(&buf, "[%d]=%u ", i, KnownAssignedXids[i]); } } elog(trace_level, "%d KnownAssignedXids (num=%d tail=%d head=%d) %s", nxids, pArray->numKnownAssignedXids, pArray->tailKnownAssignedXids, pArray->headKnownAssignedXids, buf.data); pfree(buf.data); } /* * KnownAssignedXidsReset * Resets KnownAssignedXids to be empty */ static void KnownAssignedXidsReset(void) { ProcArrayStruct *pArray = procArray; LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); pArray->numKnownAssignedXids = 0; pArray->tailKnownAssignedXids = 0; pArray->headKnownAssignedXids = 0; LWLockRelease(ProcArrayLock); }
//******************************************************************* /*! \file I2Cmaster.h \author Thomas Breuer (Bonn-Rhein-Sieg University of Applied Sciences) \date 23.03.2016 This file is released under the MIT License. */ //******************************************************************* #ifndef _HW_I2C_MASTER_H #define _HW_I2C_MASTER_H //******************************************************************* /*! \class cHwI2Cmaster \brief Abstract class supporting the I2C hardware in master mode I2C is a serial communication interface. This base class handles data transmit and receive actions \n \remark Interrupts are disabled during data transfer Derived classes should at least: - initialize the I2C hardware - implement \a start() and \a stop() to control transmission - implement \a sendAddr(), \a writeByte() and \a readByteNack() to read/write a data byte from/to hardware \example cHwI2Cmaster.cpp */ class cHwI2Cmaster { public: //--------------------------------------------------------------- /*! I2C clock rate */ typedef enum { CR_10kHz = 0, //!< clock rate = 10 kHz (SCL) CR_100kHz , //!< clock rate = 100 kHz (SCL) CR_400kHz , //!< clock rate = 400 kHz (SCL) CR_1000kHz //!< clock rate = 1000 kHz (SCL) } MODE; protected: //--------------------------------------------------------------- // Standard constructor // cHwI2Cmaster( BYTE maskIntrIn = true ); public: //--------------------------------------------------------------- /*! \todo comment missing ... */ //--------------------------------------------------------------- class Device { public: //----------------------------------------------------------- /*! \todo comment missing ... */ //----------------------------------------------------------- Device( cHwI2Cmaster &i2cIn, //!< Reference to I2C hardware BYTE hwAddrIn //!< Device's hardware address ) : i2c( i2cIn) { hwAddr = hwAddrIn; isErrorFlag = false; } //--------------------------------------------------------------- /*! \todo comment missing ... \return Received data byte */ virtual BYTE read( void ); //--------------------------------------------------------------- /*! Write an adress to the device and get back a data byte. \param addr Memory or register adress of the device \return Received data byte */ virtual BYTE read( BYTE addr ); //--------------------------------------------------------------- /*! Write an adress to the device and get back a data byte. \param addr Memory or register adress of the device \return Received data byte */ virtual BYTE read( WORD addr ); //--------------------------------------------------------------- /*! \todo comment missing ... \param data Pointer to data array \param size Size of data array */ virtual void read( BYTE *data, BYTE size ); //--------------------------------------------------------------- /*! Write an adress to the device and get back a data byte. \param addr Memory or register adress of the device \param data Pointer to data array \param size Size of data array */ virtual void read( BYTE addr, BYTE *data, BYTE size ); //--------------------------------------------------------------- /*! Write an adress to the device and get back a data byte. \param addr Memory or register adress of the device \param data Pointer to data array \param size Size of data array */ virtual void read( WORD addr, BYTE *data, BYTE size ); //--------------------------------------------------------------- /*! Write a byte to the device */ virtual void write( BYTE data //!< Transmitted data byte ); //--------------------------------------------------------------- /*! Write first an address and second a data byte. */ virtual void write( BYTE addr, //!< Memory or register adress of the device BYTE data //!< Transmitted data byte ); //--------------------------------------------------------------- /*! Write first an address and second a data byte. */ virtual void write( WORD addr, //!< Memory or register adress of the device BYTE data //!< Transmitted data byte ); //--------------------------------------------------------------- /*! Write first an address and second a data byte. */ virtual void write( BYTE *data, //!< Pointer to transmitted data array BYTE size //!< Size of data array ); //--------------------------------------------------------------- /*! Write first an address and second a data byte. */ virtual void write( BYTE addr, //!< Memory or register adress of the device BYTE *data, //!< Pointer to transmitted data array BYTE size //!< Size of data array ); //--------------------------------------------------------------- /*! Write first an address and second a data byte. */ virtual void write( WORD addr, //!< Memory or register adress of the device BYTE *data, //!< Pointer to transmitted data array BYTE size //!< Size of data array ); //--------------------------------------------------------------- /*! Check, if an error occurred Returns internal error flag. This flag is reseted by next call of \a read() or \a write() \return - true: Error, previous transfer was aborted - false: No error */ virtual BYTE isError( void ); cHwI2Cmaster &i2c; BYTE hwAddr; BYTE isErrorFlag; }; protected: //--------------------------------------------------------------- BYTE isHardwareOK; BYTE maskIntr; protected: //--------------------------------------------------------------- virtual BYTE write( BYTE hwAddr, BYTE *adr, BYTE sizeAdr, BYTE *data, BYTE sizeData ); //--------------------------------------------------------------- virtual BYTE read( BYTE hwAddr, BYTE *adr, BYTE sizeAdr, BYTE *data, BYTE sizeData ); private: //--------------------------------------------------------------- // Start a I2C transfer. // Send Start-Condition and wait until ready // virtual void start( void ) = 0; //--------------------------------------------------------------- // Stop I2C transfer // Send Stop-Condition // virtual void stop( void ) = 0; //--------------------------------------------------------------- // Send hwAddr (slave) with added R/W flag, wait for acknowledge // hwAddr: Device's hardware adress // (master write: LSB = 0, master read: LSB = 1) // virtual void sendAddr( BYTE hwAddr ) = 0; //--------------------------------------------------------------- // Send data byte and wait for acknowledge // data: Transmitted data byte // virtual void writeByte( BYTE data ) = 0; //--------------------------------------------------------------- // Wait for received data byte and return it // return: Received data byte // virtual BYTE readByteAck( void ) = 0; //--------------------------------------------------------------- // Wait for received data byte and return it // return: Received data byte // virtual BYTE readByteNack( void ) = 0; }; //cHwI2Cmaster #endif
import React from 'react'; import { Route, IndexRoute } from 'react-router'; import App from './components/app'; import PostsIndex from './components/posts_index'; import PostsNew from './components/posts_new'; import PostsShow from './components/post_show'; export default ( <Route path="/" component={App}> <IndexRoute component={PostsIndex} /> <Route path="posts/new" component={PostsNew} /> <Route path="posts/:post_id" component={PostsShow} /> </Route> ); //:post_id => this.props.params.post_id
/* globals Router Credentials Projects Session */ Router.route('/projects/:id/credentials', { name: 'credentials', controller: 'ProjectController', onRun: function () { if (Settings.findOne({ setting: 'persistViewFilters', enabled: true })) { this.next() return } Session.set('credentialsSearch', null) this.next() }, data: function () { var project = Projects.findOne({ _id: this.params.id }) if (!project) { return null } var query = { projectId: this.params.id } var search = Session.get('credentialsSearch') if (search) { query.$or = [{ username: { $regex: search, $options: 'i' } }, { password: { $regex: search, $options: 'i' } }, { format: { $regex: search, $options: 'i' } }, { hash: { $regex: search, $options: 'i' } }, { host: { $regex: search, $options: 'i' } }, { service: { $regex: search, $options: 'i' } }] } var self = this return { projectId: self.params.id, projectName: project.name, credentials: Credentials.find(query).fetch(), savedSearch: search, total: Credentials.find({projectId: self.params.id}).count() } } }) Router.route('/projects/:id/credentials/new', { name: 'newCredential', controller: 'ProjectController', data: function () { var project = Projects.findOne({ _id: this.params.id }) if (!project) { return null } var self = this return { projectId: self.params.id, projectName: project.name } } }) Router.route('/projects/:id/credentials/bulk', { name: 'newCredentialBulk', controller: 'ProjectController', data: function () { var project = Projects.findOne({ _id: this.params.id }) if (!project) { return null } var self = this return { projectId: self.params.id, projectName: project.name } } }) Router.route('/projects/:id/credentials/:cid', { name: 'editCredential', controller: 'ProjectController', data: function () { var project = Projects.findOne({ _id: this.params.id }) if (!project) { return null } var credential = Credentials.findOne({ _id: this.params.cid }) if (!credential) { return null } var self = this return { projectId: self.params.id, projectName: project.name, credential: credential } } })
import jinja2 import jinja2.nodes from jinja2.ext import Extension from .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version class WagtailCoreExtension(Extension): tags = {'include_block'} def __init__(self, environment): super().__init__(environment) self.environment.globals.update({ 'pageurl': jinja2.contextfunction(pageurl), 'slugurl': jinja2.contextfunction(slugurl), 'wagtail_version': wagtail_version, }) self.environment.filters.update({ 'richtext': richtext, }) def parse(self, parser): parse_method = getattr(self, 'parse_' + parser.stream.current.value) return parse_method(parser) def parse_include_block(self, parser): lineno = next(parser.stream).lineno args = [parser.parse_expression()] with_context = True if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'): with_context = next(parser.stream).value == 'with' parser.stream.skip() if with_context: args.append(jinja2.nodes.ContextReference()) else: # Actually we can just skip else branch because context arg default to None args.append(jinja2.nodes.Const(None)) node = self.call_method('_include_block', args, lineno=lineno) return jinja2.nodes.Output([node], lineno=lineno) def _include_block(self, value, context=None): if hasattr(value, 'render_as_block'): if context: new_context = context.get_all() else: new_context = {} return jinja2.Markup(value.render_as_block(context=new_context)) return jinja2.Markup(value) # Nicer import names core = WagtailCoreExtension
# -*- coding: utf-8 -*- from __future__ import unicode_literals import re TIP_TRIGGER = '[tip' TIP_START_TAG_PATTERN = re.compile(r'(\[tip( type=\"(.*?)\")?\])', re.MULTILINE) TIP_END_TAG_PATTERN = '[/tip]' ALLOWED_TYPES = ['default', 'note', 'important', 'read-on'] def trigger(original_body, content): if TIP_TRIGGER in original_body: return _transform(content) return content def _transform(content): for match in TIP_START_TAG_PATTERN.findall(content): # For tips without a type if not match[1]: content = content.replace( match[0], '{% call tip(\'\', type=\'note\') %}') if match[2]: type = 'default' if not match[2] in ALLOWED_TYPES else match[2] content = content.replace(match[0], '{% call tip(\'\', type=\'' + type + '\') %}') # Then also replace end tags content = content.replace(TIP_END_TAG_PATTERN, '{% endcall %}') return content
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license. """ import re import setuptools # Module dependencies requirements, dependency_links = [], [] with open('requirements.txt') as f: for line in f.read().splitlines(): requirements.append(line) with open('label_studio_sdk/__init__.py') as f: version = re.search("__version__ ?= ?'(.*?)'", f.read()).group(1) setuptools.setup( name='label-studio-sdk', version=version, author='Heartex', author_email="hello@heartex.ai", description='Label Studio annotation tool', long_description='Label Studio Python SDK', long_description_content_type='text/markdown', url='https://github.com/heartexlabs/label-studio-sdk', packages=setuptools.find_packages(), include_package_data=True, classifiers=[ 'Programming Language :: Python :: 3', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], python_requires='>=3.6', install_requires=requirements )
from django.contrib import admin from .models import Pessoas class VerPessoas(admin.ModelAdmin): list_display = ('id', 'nome', 'email') list_display_links = ('id', 'nome',) admin.site.register(Pessoas, VerPessoas)
# Copyright 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import sys from unittest.mock import MagicMock class Empty(object): pass class HasOutputCols(object): pass class Params(object): @staticmethod def _dummy(): return MagicMock() MOCK_MODULES = [ 'cloudpickle', 'ctypes', 'h5py', 'psutil', 'fsspec', 'fsspec.core', 'fsspec.utils', 'pyarrow', 'pyarrow.parquet', 'numpy', 'numpy.core.multiarray', 'numpy.dtype', 'pyspark', 'pyspark.ml', 'pyspark.ml.linalg', 'pyspark.ml.param', 'pyspark.ml.param.shared', 'pyspark.ml.util', 'pyspark.sql', 'pyspark.sql.functions', 'pyspark.sql.types', 'ray', 'ray.exceptions', 'ray.services', 'ray.util', 'ray.util.placement_group', 'tensorflow', 'tensorflow.python', 'tensorflow.python.framework', 'tensorflow.python.platform', 'tensorflow.python.eager', 'tensorflow.python.keras', 'keras', 'keras.backend', 'torch', 'torch.autograd.function', 'torch.nn.functional', 'torch.nn.modules.batchnorm', 'torch.utils', 'torch.utils.data', 'torch.utils.data.distributed', 'torch.utils.tensorboard', 'torch.distributed', 'pytorch_lightning', 'pytorch_lightning.callbacks', 'pytorch_lightning.loggers', 'pytorch_lightning.utilities', 'pytorch_lightning.utilities.model_helpers', 'mxnet', 'mxnet.base', 'horovod.common.util', 'horovod.torch.mpi_lib_v2', ] MOCK_TREE = { 'tensorflow': { '__version__': '1.14.0', 'train': { 'Optimizer': MagicMock, 'SessionRunHook': MagicMock, }, 'estimator': { 'SessionRunHook': MagicMock, }, 'keras': { 'callbacks': { 'Callback': MagicMock, }, }, }, 'keras': { 'callbacks': { 'Callback': MagicMock, }, }, 'torch': { '__version__': '1.0.0', 'nn': { 'modules': { 'batchnorm': { '_BatchNorm': MagicMock, } }, }, 'utils': { 'data': { 'Sampler': MagicMock, }, }, }, 'pyspark': { 'ml': { 'Estimator': Empty, 'Model': Empty, 'param': { 'shared': { 'HasOutputCols': HasOutputCols, 'Param': MagicMock, 'Params': Params, 'TypeConverters': MagicMock(), }, }, 'util': { 'MLReadable': Empty, 'MLWritable': Empty, } }, }, 'horovod': { 'common': { 'util': { 'get_ext_suffix': lambda: 'xyz', }, }, 'spark': { 'keras': { 'estimator': { 'KerasEstimatorParamsReadable': MagicMock, 'KerasEstimatorParamsWritable': MagicMock, }, }, 'torch': { 'estimator': { 'TorchEstimatorParamsReadable': MagicMock, 'TorchEstimatorParamsWritable': MagicMock, }, }, }, }, } def gen_mock_package(path): if type(path) == str: path = path.split('.') class TreeMock(MagicMock): @classmethod def __getattr__(cls, name): full_path = path + [name] tree_ptr = MOCK_TREE for path_part in full_path: if path_part in tree_ptr: if type(tree_ptr[path_part]) != dict: return tree_ptr[path_part] else: tree_ptr = tree_ptr[path_part] else: return MagicMock() return gen_mock_package(full_path) return TreeMock() def instrument(): sys.modules.update((mod_name, gen_mock_package(mod_name)) for mod_name in MOCK_MODULES)
// standard import aLaMode from 'alamode' import ALaImport from '@a-la/import' import App from "koa" import { methodA, methodB } from '@a-la/named-import' /* expected */ let aLaMode = require('alamode'); if (aLaMode && aLaMode.__esModule) aLaMode = aLaMode.default; let ALaImport = require('@a-la/import'); if (ALaImport && ALaImport.__esModule) ALaImport = ALaImport.default; let App = require("koa"); if (App && App.__esModule) App = App.default; const { methodA, methodB } = require('@a-la/named-import'); /**/ // default and named import def, { methodA, methodB } from 'test' /* expected */ let def = require('test'); const { methodA, methodB } = def; if (def && def.__esModule) def = def.default; /**/ // import all as import * as test from 'test' /* expected */ const test = require('test'); /**/ // ignores strings `import * as test from 'test'` /* expected */ `import * as test from 'test'` /**/
#!/usr/bin/env python # Copyright 2020 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script helps synchronize contents from their respective sources of # truth (usually GitHub repositories of each Tekton # components, such as tektoncd/pipelines) to tektoncd/website. import copy import fnmatch import json import logging import markdown from multiprocessing import Pool import os import os.path import re import sys from urllib.error import URLError from urllib.parse import urlparse, urljoin, urlunparse from bs4 import BeautifulSoup import click import git from jinja2 import Environment from jinja2 import FileSystemLoader from ruamel.yaml import YAML CONTENT_DIR = './content/en/docs' VAULT_DIR = './content/en/vault' JS_ASSET_DIR = './assets/js' TEMPLATE_DIR = './templates' BASE_FOLDER = os.path.dirname(os.path.abspath(__file__)) DEFAULT_CONFIG_FOLDER = os.path.join(BASE_FOLDER, 'config') DEFAULT_CACHE_FOLDER = os.path.join(BASE_FOLDER, '.cache') jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR)) FM_BOUNDARY = re.compile(r"^(?:<!--\n)?-{3,}\s*$(?:\n-->)?", re.MULTILINE) YAML_SEPARATOR = "---\n" FOLDER_INDEX = '_index.md' def doc_config(doc, folder_config, weight=None): """ Return the target name, folder and header for doc based on folder_config :param doc: the doc as a gitpython Blob :param folder_config: a dict with the configuration of the folder the doc was found in, as specified in the sync config file under `folders` :params weight: optional weight of the doc. When specified it's set in the returned header dict :returns: a tuple (target_filename, target_folder, header), which describes which files `doc` should be written to, in which folder, with which header """ index_file = folder_config.get('index', FOLDER_INDEX) target_folder = folder_config.get('target', '') # If the doc name is configured as index, rewrite it to FOLDER_INDEX target_filename = FOLDER_INDEX if doc.name == index_file else doc.name # If an header is specified, build it an return it header_dict = None if 'header' in folder_config: header_dict = copy.deepcopy(folder_config['header']) if weight is not None: header_dict['weight'] = weight return target_filename, target_folder, header_dict def docs_from_tree(tree, include=['*'], exclude=[]): """ Get matching docs (git blobs) from a git tree Filter all blobs directly under a tree based on include and exclude lists. Filters are specified as list of unix style filename pattern: (https://docs.python.org/3/library/fnmatch.html) """ return filter(lambda b: any(fnmatch.fnmatch(b.name, i) for i in include) and not any(fnmatch.fnmatch(b.name, e) for e in exclude), tree.blobs) def transform_docs(git_repo, tag, folders, site_folder, base_path, base_url): """ Transform all folders configured for a tag :param git_repo: a gitpython Repo object, that points to the source git repo :param tag: a string that represent the git tag to be used :param folders: a list of folder names with a dict config each, loaded from sync config file :param site_folder: the root folder on disk where files shall be written to :param base_path: used to rewrite relative links to sync'ed files :param base_url: used to rewrite relative links to unknown files """ # Get the root tree for the requested version from the repo try: tag = next(x for x in git_repo.tags if x.name == tag) except StopIteration: # When no tag is found try to match a branch (remote heads) try: tag = next(x for x in git_repo.remote().refs if x.remote_head == tag) except StopIteration: logging.error(f'No tag or branch {tag} found in {git_repo}') sys.exit(1) # List all relevant blobs based on the folder config files = [] for folder, folder_config in folders.items(): root = tag.commit.tree.join(folder) docs = docs_from_tree( tree=root, include=folder_config.get('include', ['*']), exclude=folder_config.get('exclude', [])) # zip doc, folder, targer and header so we can process them in parallel later files.extend([(doc, folder, *doc_config(doc, folder_config, idx)) for idx, doc in enumerate(docs)]) # Build a dict of all valid local links # This is used by `transfor_line` to identify local links local_files = {doc.path: (target, target_folder) for doc, _, target, target_folder, _ in files} # Build a list of tuple of `transform_doc` parameters tranform_args = [ (*f, local_files, base_path, base_url, site_folder) for f in files] with Pool() as pool: results = pool.starmap(transform_doc, tranform_args) # Return the list of files transformed return results def safe_makedirs(path): try: os.makedirs(path, exist_ok=True) except FileExistsError: pass def transform_doc(doc, source_folder, target, target_folder, header, local_files, base_path, base_url, site_folder): """ Transform a single doc to the target file Read a doc (git blob), transform links in it and writes the results in to a target file :param doc: The source doc as gitpython Blob :param source_folder: the name of the folder in the source repo where the file comes from :param target: the name of the file the transformed doc shall be written to :param target_folder: the folder within `site_folder` where the transformed doc shall be written to :param header: a dict with the content of a header (if any) to be prepended in the transformed doc :param local_files: a dict source file -> target used to rewrite relative links to sync'ed files :param base_path: used to rewrite relative links to sync'ed files :param base_url: used to rewrite relative links to unknown files :param site_folder: the root folder on disk where files shall be written to """ site_target_folder = os.path.normpath(os.path.join(site_folder, target_folder)) safe_makedirs(site_target_folder) target = os.path.join(site_target_folder, target) # Look for markdown files. # Some machines seem to use text/plain (e.g. running on a mac) and some use # text/markdown (e.g. running in a fresh ubuntu container) if doc.mime_type == 'text/plain' or doc.mime_type == 'text/markdown': with open(target, 'w+') as target_doc: # If there is an header configured, write it (in YAML) doc_all = decode(doc.data_stream.read()) doc_markdown, fm = read_front_matter(doc_all) # Update the doc front matter with the configured one and write it write_front_matter(target_doc, fm, header) doc_markdown = transform_links_doc( doc_markdown, source_folder, local_files, base_path, base_url) target_doc.write(doc_markdown) return target # Pass-through for other mime types with open(target, 'bw+') as target_doc: logging.info(f'Pass-through {doc.mime_type} file {doc.path}') target_doc.write(doc.data_stream.read()) return target def decode(s, encodings=('utf8', 'latin1', 'ascii')): for encoding in encodings: try: return s.decode(encoding) except UnicodeDecodeError: pass return s.decode('ascii', 'ignore') def read_front_matter(text): """ returns a tuple text, frontmatter (as dict) """ if FM_BOUNDARY.match(text): try: _, fm, content = FM_BOUNDARY.split(text, 2) except ValueError: # Not enough values to unpack, boundary was matched once return text, None if content.startswith('\n'): content = content[1:] return content, YAML().load(fm) else: return text, None def write_front_matter(target_doc, fm_doc, fm_config): fm_doc = fm_doc or {} fm_config = fm_config or {} fm_doc.update(fm_config) if fm_doc: target_doc.write(YAML_SEPARATOR) YAML().dump(fm_doc, target_doc) target_doc.write(YAML_SEPARATOR) def transform_links_doc(text, base_path, local_files, rewrite_path, rewrite_url): """ transform all the links the text """ links = get_links(text) # Rewrite map, only use links with an href rewrite_map = {x.get("href"): transform_link(x.get("href"), base_path, local_files, rewrite_path, rewrite_url) for x in links if x.get("href")} for source, target in rewrite_map.items(): text = text.replace(f'({source})', f'({target})') return text def get_links(md): """ return a list of all the links in a string formatted in markdown """ md = markdown.markdown(md) soup = BeautifulSoup(md, 'html.parser') return soup.find_all("a") def transform_link(link, base_path, local_files, rewrite_path, rewrite_url): """ Transform hrefs to be valid URLs on the web-site Relative URLs are rewritten to `rewrite_path` when `link` points to a sync'ed file. Else they're rewritten to `rewrite_url`. Absolute URLs are not changed (they may be external) Fragments are relative to the page and do not need changes, except for lower() on local files because hugo generated anchors are always lower case. :param link: the link to be re-written :param base_path: the folder where the source document that contains the link lives :param local_files: a dict source file -> (target file, folder) that maps sync'ed files from their fully qualified source name into their filename in the site folder :param rewrite_path: the file local (sync'ed) files are rewritten to :param rewrite_url: the URL remote files are rewritten to :note: urlparse treats URLs without scheme like path only URLs, so 'github.com' will be rewritten to 'rewrite_url/github.com' """ # ignore empty links if not link: return link # urlparse returns a named tuple parsed = urlparse(link) if is_absolute_url(parsed): return link if is_fragment(parsed): # A fragment only link points to an .md file return urlunparse(parsed._replace(fragment=parsed.fragment.lower())) path = os.path.normpath(parsed.path) # The list if local_file includes paths based on the root of the git # repo, so we need join base_path and normalize to fq_path to find the # link in the list of local files fq_path = os.path.normpath(os.path.join(base_path, parsed.path)) if fq_path in local_files: target_file = local_files[fq_path][0] target_folder = local_files[fq_path][1] is_index = (target_file == FOLDER_INDEX) filename, ext = os.path.splitext(target_file) # Special handling for md files if ext == '.md': # Links to the index file are rendered as base_path/ if is_index: target_file = '' # links to md other files are rendered as .../[md filename]/ else: target_file = filename + '/' # for .md files, lower the case of fragments to match hugo's behaviour parsed = parsed._replace(fragment=parsed.fragment.lower()) if target_folder: new_path = [rewrite_path, target_folder, target_file] else: new_path = [rewrite_path, target_file] return parsed._replace(path="/".join(new_path)).geturl() # when not found on disk, append to the base_url return urljoin(rewrite_url, parsed._replace(path=fq_path).geturl()) def is_absolute_url(parsed_url): """ check if it is an absolute url """ return all([parsed_url.scheme, parsed_url.netloc]) def is_fragment(parsed_url): """ determine if the url is an a link """ return len(parsed_url.fragment) > 0 and not any(parsed_url[:-1]) def download_resources_to_project(yaml_list, clones): """ download the files from local clones based on a spec. The YAML sync spec can be found in sync/config/README.md """ for entry in yaml_list: component = entry['component'] repository = entry['repository'] local_clone = clones.get(repository) if not local_clone: logging.error(f'No git clone found for {repository} in {clones}') sys.exit(1) for index, tag in enumerate(entry['tags']): logging.info(f'Syncing {component}@{tag["name"]}') link_base_url = f'{repository}/tree/{tag["name"]}/' if index == 0: # first links belongs on the home page base_path = f'/docs/{component}'.lower() site_dir = f'{CONTENT_DIR}/{component}' os.makedirs(site_dir, exist_ok=True) else: # the other links belong in the other versions a.k.a vault base_path = f'/vault/{component}-{tag["displayName"]}' site_dir = f'{VAULT_DIR}/{component}-{tag["displayName"]}' os.makedirs(site_dir, exist_ok=True) results = transform_docs( git_repo=local_clone, tag=tag['name'], folders=tag['folders'], site_folder=site_dir, base_path=base_path, base_url=link_base_url) logging.debug(f'Finished syncing {component}@{tag["name"]}: ') logging.debug(f'{results}') def get_files_in_path(path, file_type): """ return a list of all the files in path that match the file_type """ file_list = [] # walk through every file in directory and its sub directories for root, dirs, files in os.walk(path): for file in files: # append the file name to the list if is it the correct type if file.endswith(file_type): file_list.append(os.path.join(root, file)) return file_list def load_config(files): """ return a list of yaml files""" yaml = YAML() dic_list = [] for file in files: with open(file, 'r') as text: # get the paths from the config file dic_list.append({ "filename": file, "content": yaml.load(text) }) return dic_list def save_config(config): """ save config files back to yaml """ yaml = YAML() for c in config: with open(c['filename'], 'w') as out: yaml.dump(c['content'], out) def get_tags(sync_config): """ return a list of tags with, there name, and displayName """ tags = [] for tag in sync_config['tags']: tags.append({'name': tag['name'], 'displayName': tag['displayName']}) return tags def get_versions(sync_configs): """ return the list of all the versions and there tag, name, archive """ component_versions = [] for sync_config in sync_configs: component_versions.append({ 'name': sync_config['component'], 'tags': get_tags(sync_config), 'archive': sync_config['archive'] }) return component_versions def create_resource(dest_prefix, file, versions): """ create site resource based on the version and file """ resource_template = jinja_env.get_template(f'{file}.template') if file.endswith(".js"): serialize = json.dumps(versions) resource = resource_template.render(component_versions_json=serialize) elif file.endswith(".md"): resource = resource_template.render(component_versions=versions) else: logging.warning(f'Cannot create resource for {file}. Only .js and .md supported') return with open(f'{dest_prefix}/{file}', 'w') as f: f.write(resource) def clone_repo(repo, update): project = repo.split('/')[-1] clone_dir = os.path.join(DEFAULT_CACHE_FOLDER, project) if os.path.isdir(clone_dir): if not update: print(f'{project}: Cache folder {clone_dir} found, skipping clone.') return repo, git.Repo(clone_dir) # Cleanup and update via fetch --all print(f'{project}: updating started') cloned_repo = git.Repo(clone_dir) cloned_repo.git.reset('--hard') cloned_repo.git.clean('-xdf') cloned_repo.git.fetch('--all') print(f'{project}: updating completed') return repo, cloned_repo # Clone the repo print(f'{project}: cloning started') cloned_repo = git.Repo.clone_from(repo, clone_dir) print(f'{project}: cloning completed') return repo, cloned_repo def clone_repos(sync_configs, update): # Make sure the cache folder exists safe_makedirs(DEFAULT_CACHE_FOLDER) with Pool() as pool: results = pool.starmap(clone_repo, [(x['repository'], update) for x in sync_configs]) return {x: y for x, y in results} @click.command() @click.option('--config-folder', default=DEFAULT_CONFIG_FOLDER, help='the folder that contains the config files') @click.option('--update-cache/--no-update-cache', default=False, help='update clone caches. !! This will force cleanup caches !!') def sync(config_folder, update_cache): """ fetch all the files and sync it to the website """ # get the path of the urls needed config_files = get_files_in_path(config_folder, ".yaml") config = [x["content"] for x in load_config(config_files)] # clone all relevant repos clones = clone_repos(config, update_cache) # download resources from the clone cache download_resources_to_project(config, clones) versions = get_versions(config) # create version switcher script create_resource(JS_ASSET_DIR, "version-switcher.js", versions) # create index for vault create_resource(VAULT_DIR, FOLDER_INDEX, versions) if __name__ == '__main__': sync()
"""Nose Plugin that supports IPython doctests. Limitations: - When generating examples for use as doctests, make sure that you have pretty-printing OFF. This can be done either by setting the ``PlainTextFormatter.pprint`` option in your configuration file to False, or by interactively disabling it with %Pprint. This is required so that IPython output matches that of normal Python, which is used by doctest for internal execution. - Do not rely on specific prompt numbers for results (such as using '_34==True', for example). For IPython tests run via an external process the prompt numbers may be different, and IPython tests run as normal python code won't even have these special _NN variables set at all. """ #----------------------------------------------------------------------------- # Module imports # From the standard library import doctest import inspect import logging import os import re import sys import traceback import unittest from inspect import getmodule # We are overriding the default doctest runner, so we need to import a few # things from doctest directly from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE, _unittest_reportflags, DocTestRunner, _extract_future_flags, pdb, _OutputRedirectingPdb, _exception_traceback, linecache) # Third-party modules import nose.core from nose.plugins import doctests, Plugin from nose.util import anyp, getpackage, test_address, resolve_name, tolist # Our own imports from IPython.utils.py3compat import builtin_mod, PY3, getcwd if PY3: from io import StringIO else: from StringIO import StringIO #----------------------------------------------------------------------------- # Module globals and other constants #----------------------------------------------------------------------------- log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- def is_extension_module(filename): """Return whether the given filename is an extension module. This simply checks that the extension is either .so or .pyd. """ return os.path.splitext(filename)[1].lower() in ('.so', '.pyd') class DocTestSkip(object): """Object wrapper for doctests to be skipped.""" ds_skip = """Doctest to skip. >>> 1 #doctest: +SKIP """ def __init__(self, obj): self.obj = obj def __getattribute__(self, key): if key == '__doc__': return DocTestSkip.ds_skip else: return getattr(object.__getattribute__(self, 'obj'), key) # Modified version of the one in the stdlib, that fixes a python bug (doctests # not found in extension modules, http://bugs.python.org/issue3158) class DocTestFinder(doctest.DocTestFinder): def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: return True elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.isbuiltin(object): return module.__name__ == object.__module__ elif inspect.isclass(object): return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea return module.__name__ == object.__self__.__class__.__module__ elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. elif inspect.ismethoddescriptor(object): # Unbound PyQt signals reach this point in Python 3.4b3, and we want # to avoid throwing an error. See also # http://bugs.python.org/issue3158 return False else: raise ValueError( "object must be a class or function, got %r" % object) def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ # print '_find for:', obj, name, module # dbg if hasattr(obj, "skip_doctest"): # print 'SKIPPING DOCTEST FOR:',obj # dbg obj = DocTestSkip(obj) doctest.DocTestFinder._find(self, tests, obj, name, module, source_lines, globs, seen) # Below we re-run pieces of the above method with manual modifications, # because the original code is buggy and fails to correctly identify # doctests in extension modules. # Local shorthands from inspect import isroutine, isclass, ismodule # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname1 = '%s.%s' % (name, valname) if ((isroutine(val) or isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname1, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj) and self._recurse: # print 'RECURSE into class:',obj # dbg for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) class IPDoctestOutputChecker(doctest.OutputChecker): """Second-chance checker with support for random tests. If the default comparison doesn't pass, this checker looks in the expected output string for flags that tell us to ignore the output. """ random_re = re.compile(r'#\s*random\s+') def check_output(self, want, got, optionflags): """Check output, accepting special markers embedded in the output. If the output didn't pass the default validation but the special string '#random' is included, we accept it.""" # Let the original tester verify first, in case people have valid tests # that happen to have a comment saying '#random' embedded in. ret = doctest.OutputChecker.check_output(self, want, got, optionflags) if not ret and self.random_re.search(want): # print >> sys.stderr, 'RANDOM OK:',want # dbg return True return ret class DocTestCase(doctests.DocTestCase): """Proxy for DocTestCase: provides an address() method that returns the correct address for the doctest case. Otherwise acts as a proxy to the test case. To provide hints for address(), an obj may also be passed -- this will be used as the test object for purposes of determining the test address, if it is provided. """ # Note: this method was taken from numpy's nosetester module. # Subclass nose.plugins.doctests.DocTestCase to work around a bug in # its constructor that blocks non-default arguments from being passed # down into doctest.DocTestCase def __init__(self, test, optionflags=0, setUp=None, tearDown=None, checker=None, obj=None, result_var='_'): self._result_var = result_var doctests.DocTestCase.__init__(self, test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, checker=checker) # Now we must actually copy the original constructor from the stdlib # doctest class, because we can't call it directly and a bug in nose # means it never gets passed the right arguments. self._dt_optionflags = optionflags self._dt_checker = checker self._dt_test = test self._dt_test_globs_ori = test.globs self._dt_setUp = setUp self._dt_tearDown = tearDown # XXX - store this runner once in the object! runner = IPDocTestRunner(optionflags=optionflags, checker=checker, verbose=False) self._dt_runner = runner # Each doctest should remember the directory it was loaded from, so # things like %run work without too many contortions self._ori_dir = os.path.dirname(test.filename) # Modified runTest from the default stdlib def runTest(self): test = self._dt_test runner = self._dt_runner old = sys.stdout new = StringIO() optionflags = self._dt_optionflags if not (optionflags & REPORTING_FLAGS): # The option flags don't include any reporting flags, # so add the default reporting flags optionflags |= _unittest_reportflags try: # Save our current directory and switch out to the one where the # test was originally created, in case another doctest did a # directory change. We'll restore this in the finally clause. curdir = getcwd() # print 'runTest in dir:', self._ori_dir # dbg os.chdir(self._ori_dir) runner.DIVIDER = "-" * 70 failures, tries = runner.run(test, out=new.write, clear_globs=False) finally: sys.stdout = old os.chdir(curdir) if failures: raise self.failureException(self.format_failure(new.getvalue())) def setUp(self): """Modified test setup that syncs with ipython namespace""" # print "setUp test", self._dt_test.examples # dbg if isinstance(self._dt_test.examples[0], IPExample): # for IPython examples *only*, we swap the globals with the ipython # namespace, after updating it with the globals (which doctest # fills with the necessary info from the module being tested). self.user_ns_orig = {} self.user_ns_orig.update(_ip.user_ns) _ip.user_ns.update(self._dt_test.globs) # We must remove the _ key in the namespace, so that Python's # doctest code sets it naturally _ip.user_ns.pop('_', None) _ip.user_ns['__builtins__'] = builtin_mod self._dt_test.globs = _ip.user_ns super(DocTestCase, self).setUp() def tearDown(self): # Undo the test.globs reassignment we made, so that the parent class # teardown doesn't destroy the ipython namespace if isinstance(self._dt_test.examples[0], IPExample): self._dt_test.globs = self._dt_test_globs_ori _ip.user_ns.clear() _ip.user_ns.update(self.user_ns_orig) # XXX - fperez: I am not sure if this is truly a bug in nose 0.11, but # it does look like one to me: its tearDown method tries to run # # delattr(builtin_mod, self._result_var) # # without checking that the attribute really is there; it implicitly # assumes it should have been set via displayhook. But if the # displayhook was never called, this doesn't necessarily happen. I # haven't been able to find a little self-contained example outside of # ipython that would show the problem so I can report it to the nose # team, but it does happen a lot in our code. # # So here, we just protect as narrowly as possible by trapping an # attribute error whose message would be the name of self._result_var, # and letting any other error propagate. try: super(DocTestCase, self).tearDown() except AttributeError as exc: if exc.args[0] != self._result_var: raise # A simple subclassing of the original with a different class name, so we can # distinguish and treat differently IPython examples from pure python ones. class IPExample(doctest.Example): pass class IPExternalExample(doctest.Example): """Doctest examples to be run in an external process.""" def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, options=None): # Parent constructor doctest.Example.__init__( self, source, want, exc_msg, lineno, indent, options) # An EXTRA newline is needed to prevent pexpect hangs self.source += '\n' class IPDocTestParser(doctest.DocTestParser): """ A class used to parse strings containing doctest examples. Note: This is a version modified to properly recognize IPython input and convert any IPython examples into valid Python ones. """ # This regular expression is used to find doctest examples in a # string. It defines three groups: `source` is the source code # (including leading indentation and prompts); `indent` is the # indentation of the first (PS1) line of the source code; and # `want` is the expected output (including leading indentation). # Classic Python prompts or default IPython ones _PS1_PY = r'>>>' _PS2_PY = r'\.\.\.' _PS1_IP = r'In\ \[\d+\]:' _PS2_IP = r'\ \ \ \.\.\.+:' _RE_TPL = r''' # Source consists of a PS1 line followed by zero or more PS2 lines. (?P<source> (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines \n? # a newline # Want consists of any non-blank lines that do not start with PS1. (?P<want> (?:(?![ ]*$) # Not a blank line (?![ ]*%s) # Not a line starting with PS1 (?![ ]*%s) # Not a line starting with PS2 .*$\n? # But any other line )*) ''' _EXAMPLE_RE_PY = re.compile(_RE_TPL % (_PS1_PY, _PS2_PY, _PS1_PY, _PS2_PY), re.MULTILINE | re.VERBOSE) _EXAMPLE_RE_IP = re.compile(_RE_TPL % (_PS1_IP, _PS2_IP, _PS1_IP, _PS2_IP), re.MULTILINE | re.VERBOSE) # Mark a test as being fully random. In this case, we simply append the # random marker ('#random') to each individual example's output. This way # we don't need to modify any other code. _RANDOM_TEST = re.compile(r'#\s*all-random\s+') # Mark tests to be executed in an external process - currently unsupported. _EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL') def ip2py(self, source): """Convert input IPython source into valid Python.""" block = _ip.input_transformer_manager.transform_cell(source) if len(block.splitlines()) == 1: return _ip.prefilter(block) else: return block def parse(self, string, name='<string>'): """ Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages. """ # print 'Parse string:\n',string # dbg string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = self._min_indent(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] charno, lineno = 0, 0 # We make 'all random' tests by adding the '# random' mark to every # block of output in the test. if self._RANDOM_TEST.search(string): random_marker = '\n# random' else: random_marker = '' # Whether to convert the input from ipython to python syntax ip2py = False # Find all doctest examples in the string. First, try them as Python # examples, then as IPython ones terms = list(self._EXAMPLE_RE_PY.finditer(string)) if terms: # Normal Python example # print '-'*70 # dbg # print 'PyExample, Source:\n',string # dbg # print '-'*70 # dbg Example = doctest.Example else: # It's an ipython example. Note that IPExamples are run # in-process, so their syntax must be turned into valid python. # IPExternalExamples are run out-of-process (via pexpect) so they # don't need any filtering (a real ipython will be executing them). terms = list(self._EXAMPLE_RE_IP.finditer(string)) if self._EXTERNAL_IP.search(string): # print '-'*70 # dbg # print 'IPExternalExample, Source:\n',string # dbg # print '-'*70 # dbg Example = IPExternalExample else: # print '-'*70 # dbg # print 'IPExample, Source:\n',string # dbg # print '-'*70 # dbg Example = IPExample ip2py = True for m in terms: # Add the pre-example text to `output`. output.append(string[charno:m.start()]) # Update lineno (lines before this example) lineno += string.count('\n', charno, m.start()) # Extract info from the regexp match. (source, options, want, exc_msg) = \ self._parse_example(m, name, lineno, ip2py) # Append the random-output marker (it defaults to empty in most # cases, it's only non-empty for 'all-random' tests): want += random_marker if Example is IPExternalExample: options[doctest.NORMALIZE_WHITESPACE] = True want += '\n' # Create an Example, and add it to the list. if not self._IS_BLANK_OR_COMMENT(source): output.append(Example(source, want, exc_msg, lineno=lineno, indent=min_indent + len(m.group('indent')), options=options)) # Update lineno (lines inside this example) lineno += string.count('\n', m.start(), m.end()) # Update charno. charno = m.end() # Add any remaining post-example text to `output`. output.append(string[charno:]) return output def _parse_example(self, m, name, lineno, ip2py=False): """ Given a regular expression match from `_EXAMPLE_RE` (`m`), return a pair `(source, want)`, where `source` is the matched example's source code (with prompts and indentation stripped); and `want` is the example's expected output (with indentation stripped). `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. Optional: `ip2py`: if true, filter the input via IPython to convert the syntax into valid python. """ # Get the example's indentation level. indent = len(m.group('indent')) # Divide source into lines; check that they're properly # indented; and then strip their indentation & prompts. source_lines = m.group('source').split('\n') # We're using variable-length input prompts ps1 = m.group('ps1') ps2 = m.group('ps2') ps1_len = len(ps1) self._check_prompt_blank(source_lines, indent, name, lineno, ps1_len) if ps2: self._check_prefix( source_lines[1:], ' ' * indent + ps2, name, lineno) source = '\n'.join([sl[indent + ps1_len + 1:] for sl in source_lines]) if ip2py: # Convert source input from IPython into valid Python syntax source = self.ip2py(source) # Divide want into lines; check that it's properly indented; and # then strip the indentation. Spaces before the last newline should # be preserved, so plain rstrip() isn't good enough. want = m.group('want') want_lines = want.split('\n') if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): del want_lines[-1] # forget final newline & spaces after it self._check_prefix(want_lines, ' ' * indent, name, lineno + len(source_lines)) # Remove ipython output prompt that might be present in the first line want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?', '', want_lines[0]) want = '\n'.join([wl[indent:] for wl in want_lines]) # If `want` contains a traceback message, then extract it. m = self._EXCEPTION_RE.match(want) if m: exc_msg = m.group('msg') else: exc_msg = None # Extract options from the source. options = self._find_options(source, name, lineno) return source, options, want, exc_msg def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len): """ Given the lines of a source string (including prompts and leading indentation), check to make sure that every prompt is followed by a space character. If any line is not followed by a space character, then raise ValueError. Note: IPython-modified version which takes the input prompt length as a parameter, so that prompts of variable length can be dealt with. """ space_idx = indent + ps1_len min_len = space_idx + 1 for i, line in enumerate(lines): if len(line) >= min_len and line[space_idx] != ' ': raise ValueError('line %r of the docstring for %s ' 'lacks blank after %s: %r' % (lineno + i + 1, name, line[indent:space_idx], line)) SKIP = doctest.register_optionflag('SKIP') class IPDocTestRunner(doctest.DocTestRunner, object): """Test runner that synchronizes the IPython namespace with test globals. """ def run(self, test, compileflags=None, out=None, clear_globs=True): # Hack: ipython needs access to the execution context of the example, # so that it can propagate user variables loaded by %run into # test.globs. We put them here into our modified %run as a function # attribute. Our new %run will then only make the namespace update # when called (rather than unconconditionally updating test.globs here # for all examples, most of which won't be calling %run anyway). #_ip._ipdoctest_test_globs = test.globs #_ip._ipdoctest_test_filename = test.filename test.globs.update(_ip.user_ns) return super(IPDocTestRunner, self).run(test, compileflags, out, clear_globs) class DocFileCase(doctest.DocFileCase): """Overrides to provide filename """ def address(self): return (self._dt_test.filename, None, None) class ExtensionDoctest(doctests.Doctest): """Nose Plugin that supports doctests in extension modules. """ name = 'extdoctest' # call nosetests with --with-extdoctest enabled = True def options(self, parser, env=os.environ): Plugin.options(self, parser, env) parser.add_option('--doctest-tests', action='store_true', dest='doctest_tests', default=env.get('NOSE_DOCTEST_TESTS', True), help="Also look for doctests in test modules. " "Note that classes, methods and functions should " "have either doctests or non-doctest tests, " "not both. [NOSE_DOCTEST_TESTS]") parser.add_option('--doctest-extension', action="append", dest="doctestExtension", help="Also look for doctests in files with " "this extension [NOSE_DOCTEST_EXTENSION]") # Set the default as a list, if given in env; otherwise # an additional value set on the command line will cause # an error. env_setting = env.get('NOSE_DOCTEST_EXTENSION') if env_setting is not None: parser.set_defaults(doctestExtension=tolist(env_setting)) def configure(self, options, config): Plugin.configure(self, options, config) # Pull standard doctest plugin out of config; we will do doctesting config.plugins.plugins = [p for p in config.plugins.plugins if p.name != 'doctest'] self.doctest_tests = options.doctest_tests self.extension = tolist(options.doctestExtension) self.parser = doctest.DocTestParser() self.finder = DocTestFinder() self.checker = IPDoctestOutputChecker() self.globs = None self.extraglobs = None def loadTestsFromExtensionModule(self, filename): bpath, mod = os.path.split(filename) modname = os.path.splitext(mod)[0] try: sys.path.append(bpath) module = __import__(modname) tests = list(self.loadTestsFromModule(module)) finally: sys.path.pop() return tests # NOTE: the method below is almost a copy of the original one in nose, with # a few modifications to control output checking. def loadTestsFromModule(self, module): # print '*** ipdoctest - lTM',module # dbg if not self.matches(module.__name__): log.debug("Doctest doesn't want module %s", module) return tests = self.finder.find(module, globs=self.globs, extraglobs=self.extraglobs) if not tests: return # always use whitespace and ellipsis options optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS tests.sort() module_file = module.__file__ if module_file[-4:] in ('.pyc', '.pyo'): module_file = module_file[:-1] for test in tests: if not test.examples: continue if not test.filename: test.filename = module_file yield DocTestCase(test, optionflags=optionflags, checker=self.checker) def loadTestsFromFile(self, filename): # print "ipdoctest - from file", filename # dbg if is_extension_module(filename): for t in self.loadTestsFromExtensionModule(filename): yield t else: if self.extension and anyp(filename.endswith, self.extension): name = os.path.basename(filename) dh = open(filename) try: doc = dh.read() finally: dh.close() test = self.parser.get_doctest( doc, globs={'__file__': filename}, name=name, filename=filename, lineno=0) if test.examples: # print 'FileCase:',test.examples # dbg yield DocFileCase(test) else: yield False # no tests to load class IPythonDoctest(ExtensionDoctest): """Nose Plugin that supports doctests in extension modules. """ name = 'ipdoctest' # call nosetests with --with-ipdoctest enabled = True def makeTest(self, obj, parent): """Look for doctests in the given object, which will be a function, method or class. """ # print 'Plugin analyzing:', obj, parent # dbg # always use whitespace and ellipsis options optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS doctests = self.finder.find(obj, module=getmodule(parent)) if doctests: for test in doctests: if len(test.examples) == 0: continue yield DocTestCase(test, obj=obj, optionflags=optionflags, checker=self.checker) def options(self, parser, env=os.environ): # print "Options for nose plugin:", self.name # dbg Plugin.options(self, parser, env) parser.add_option('--ipdoctest-tests', action='store_true', dest='ipdoctest_tests', default=env.get('NOSE_IPDOCTEST_TESTS', True), help="Also look for doctests in test modules. " "Note that classes, methods and functions should " "have either doctests or non-doctest tests, " "not both. [NOSE_IPDOCTEST_TESTS]") parser.add_option('--ipdoctest-extension', action="append", dest="ipdoctest_extension", help="Also look for doctests in files with " "this extension [NOSE_IPDOCTEST_EXTENSION]") # Set the default as a list, if given in env; otherwise # an additional value set on the command line will cause # an error. env_setting = env.get('NOSE_IPDOCTEST_EXTENSION') if env_setting is not None: parser.set_defaults(ipdoctest_extension=tolist(env_setting)) def configure(self, options, config): # print "Configuring nose plugin:", self.name # dbg Plugin.configure(self, options, config) # Pull standard doctest plugin out of config; we will do doctesting config.plugins.plugins = [p for p in config.plugins.plugins if p.name != 'doctest'] self.doctest_tests = options.ipdoctest_tests self.extension = tolist(options.ipdoctest_extension) self.parser = IPDocTestParser() self.finder = DocTestFinder(parser=self.parser) self.checker = IPDoctestOutputChecker() self.globs = None self.extraglobs = None
define(["jquery", "Q", "modules"], function ($, Q, modules) { function makeRequest(url, type, data, content) { var deferred = Q.defer(); var requestOptions = { url: url, type: type, //dataType: "application/x-www-form-urlencoded", data: data, beforeSend: function (xhr) { var token = JSON.parse(localStorage.getItem("token")); if (token !== null) { //xhr.withCredentials = true; xhr.setRequestHeader("Authorization", "Bearer " + token); } }, success: function resolveDeferred(requestData) { deferred.resolve(requestData); }, error: function rejectDeferred(errorData) { deferred.reject(JSON.parse(errorData.responseText)); } } if (content == null) { requestOptions.contentType = "application/json; charset=utf-8"; } else { requestOptions.contentType = content; } $.ajax(requestOptions); return deferred.promise; } function makeGetRequest(url) { return makeRequest(url, "get"); } function makePostRequest(url, data, content) { return makeRequest(url, "POST", data, content); } return { get: makeGetRequest, post: makePostRequest } });
// THIS FILE IS AUTO GENERATED import { GenIcon } from '../lib'; export function GiWineBottle (props) { return GenIcon({"tag":"svg","attr":{"viewBox":"0 0 512 512"},"child":[{"tag":"path","attr":{"d":"M133.99 28v23.512h52.02V28h-52.02zm0 41.51v90.705c-26.01 17.34-43.347 39.014-43.347 56.353v260.735S90.64 494 107.98 494h103.967c17.411 0 17.41-17.34 17.41-17.34V216.568c0-17.34-17.338-39.014-43.347-56.353V69.51h-52.02zM107 252h106v162H107V252zm194.514 3l-2.051 6.154c-8.474 25.423-12.793 58.44-6.233 86.87 3.28 14.215 9.429 27.45 19.846 37.273 8.61 8.118 20.105 13.533 33.924 15.172v74.64C327.601 479.296 302 494 302 494h108s-25.601-14.705-45-18.89v-74.641c13.82-1.639 25.314-7.054 33.924-15.172 10.417-9.822 16.565-23.058 19.846-37.274 6.56-28.43 2.241-61.446-6.233-86.869l-2.05-6.154H301.513zM125 270v126h70V270h-70zm189.703 3h82.594c2.639 9.261 4.629 19.565 5.68 30h-93.954c1.051-10.435 3.041-20.739 5.68-30zm-6.486 48h95.566c-.116 8.04-.907 15.846-2.553 22.977-2.72 11.784-7.571 21.548-14.654 28.226C379.494 378.881 370.126 383 356 383c-14.125 0-23.494-4.12-30.576-10.797-7.083-6.678-11.935-16.442-14.654-28.226-1.646-7.131-2.437-14.938-2.553-22.977z"}}]})(props); };
/* Copyright 2020 Mozilla Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { escapePDFName, numberToString } from "./core_utils.js"; import { OPS, warn } from "../shared/util.js"; import { ColorSpace } from "./colorspace.js"; import { EvaluatorPreprocessor } from "./evaluator.js"; import { Name } from "./primitives.js"; import { StringStream } from "./stream.js"; class DefaultAppearanceEvaluator extends EvaluatorPreprocessor { constructor(str) { super(new StringStream(str)); } parse() { const operation = { fn: 0, args: [], }; const result = { fontSize: 0, fontName: "", fontColor: /* black = */ new Uint8ClampedArray(3), }; try { while (true) { operation.args.length = 0; // Ensure that `args` it's always reset. if (!this.read(operation)) { break; } if (this.savedStatesDepth !== 0) { continue; // Don't get info in save/restore sections. } const { fn, args } = operation; switch (fn | 0) { case OPS.setFont: const [fontName, fontSize] = args; if (fontName instanceof Name) { result.fontName = fontName.name; } if (typeof fontSize === "number" && fontSize > 0) { result.fontSize = fontSize; } break; case OPS.setFillRGBColor: ColorSpace.singletons.rgb.getRgbItem(args, 0, result.fontColor, 0); break; case OPS.setFillGray: ColorSpace.singletons.gray.getRgbItem(args, 0, result.fontColor, 0); break; case OPS.setFillColorSpace: ColorSpace.singletons.cmyk.getRgbItem(args, 0, result.fontColor, 0); break; } } } catch (reason) { warn(`parseDefaultAppearance - ignoring errors: "${reason}".`); } return result; } } // Parse DA to extract font and color information. function parseDefaultAppearance(str) { return new DefaultAppearanceEvaluator(str).parse(); } function getPdfColor(color) { if (color[0] === color[1] && color[1] === color[2]) { const gray = color[0] / 255; return `${numberToString(gray)} g`; } return ( Array.from(color) .map(c => numberToString(c / 255)) .join(" ") + " rg" ); } // Create default appearance string from some information. function createDefaultAppearance({ fontSize, fontName, fontColor }) { return `/${escapePDFName(fontName)} ${fontSize} Tf ${getPdfColor(fontColor)}`; } export { createDefaultAppearance, getPdfColor, parseDefaultAppearance };
import datetime from dateutil.relativedelta import relativedelta import json from lxml import etree import os import random import urllib import uuid from nose.tools import ( eq_, assert_raises, assert_raises_regexp, set_trace, ) from StringIO import StringIO from api.authenticator import BasicAuthenticationProvider from api.circulation import ( LoanInfo, HoldInfo, FulfillmentInfo, ) from api.config import ( Configuration, temp_config, ) from api.circulation_exceptions import * from api.rbdigital import ( AudiobookManifest, RBDigitalAPI, RBDigitalBibliographicCoverageProvider, RBDigitalCirculationMonitor, RBDigitalDeltaMonitor, RBDigitalFulfillmentProxy, RBDigitalImportMonitor, RBDigitalRepresentationExtractor, RBDigitalSyncMonitor, MockRBDigitalAPI, RBFulfillmentInfo, ) from core.classifier import Classifier from core.coverage import CoverageFailure from core.metadata_layer import ( CirculationData, ContributorData, IdentifierData, Metadata, SubjectData, TimestampData, ) from core.model import ( get_one_or_create, Classification, ConfigurationSetting, Contributor, Credential, Collection, DataSource, DeliveryMechanism, Edition, ExternalIntegration, Hyperlink, Identifier, LicensePool, Patron, Representation, Subject, Work, ) from core.scripts import RunCollectionCoverageProviderScript from core.testing import MockRequestsResponse from core.util.http import ( BadResponseException, RemoteIntegrationException, HTTP, ) from . import ( DatabaseTest, ) from .test_routes import RouteTest from .test_controller import ControllerTest class RBDigitalAPITest(DatabaseTest): def setup(self): super(RBDigitalAPITest, self).setup() self.base_path = os.path.split(__file__)[0] self.resource_path = os.path.join(self.base_path, "files", "rbdigital") # Make sure the default library is created so that it will # be configured properly with the mock collection. self._default_library self.collection = MockRBDigitalAPI.mock_collection(self._db) self.api = MockRBDigitalAPI( self._db, self.collection, base_path=self.base_path ) def get_data(self, filename): # returns contents of sample file as string and as dict path = os.path.join(self.resource_path, filename) data = open(path).read() return data, json.loads(data) @property def default_patron(self): """Create a default patron on demand.""" if not hasattr(self, '_default_patron'): self._default_patron = self._patron( external_identifier="rbdigital_testuser" ) self._default_patron.authorization_identifier="13057226" return self._default_patron class TestRBDigitalAPI(RBDigitalAPITest): def test__run_self_tests(self): class Mock(MockRBDigitalAPI): """Mock the methods invoked by the self-test.""" # We're going to count the number of items in the # eBook and eAudio collections. def get_ebook_availability_info(self, media_type): if media_type=='eBook': return [] elif media_type=='eAudio': # Three titles - one available, one unavailable, and # one with availability missing. return [ dict(availability=False), dict(availability=True), dict(), ] # Then for each collection with a default patron, we're # going to see how many loans and holds the default patron # has. patron_activity_called_with = [] def patron_activity(self, patron, pin): self.patron_activity_called_with.append( (patron.authorization_identifier, pin) ) return [1,2,3] # Now let's make sure two Libraries have access to this # Collection -- one library with a default patron and one # without. no_default_patron = self._library() self.collection.libraries.append(no_default_patron) with_default_patron = self._default_library integration = self._external_integration( "api.simple_authentication", ExternalIntegration.PATRON_AUTH_GOAL, libraries=[with_default_patron] ) p = BasicAuthenticationProvider integration.setting(p.TEST_IDENTIFIER).value = "username1" integration.setting(p.TEST_PASSWORD).value = "password1" # Now that everything is set up, run the self-test. api = Mock(self._db, self.collection) results = sorted( api._run_self_tests(self._db), key=lambda x: x.name ) [no_patron_credential, patron_activity, audio_count, ebook_count] = results # Verify that each test method was called and returned the # expected SelfTestResult object. eq_( "Acquiring test patron credentials for library %s" % no_default_patron.name, no_patron_credential.name ) eq_(False, no_patron_credential.success) eq_("Library has no test patron configured.", no_patron_credential.exception.message) eq_("Checking patron activity, using test patron for library %s" % with_default_patron.name, patron_activity.name) eq_(True, patron_activity.success) eq_("Total loans and holds: 3", patron_activity.result) eq_([("username1", "password1")], api.patron_activity_called_with) eq_("Counting audiobooks in collection", audio_count.name) eq_(True, audio_count.success) eq_("Total items: 3 (1 currently loanable, 2 currently not loanable)", audio_count.result) eq_("Counting ebooks in collection", ebook_count.name) eq_(True, ebook_count.success) eq_("Total items: 0 (0 currently loanable, 0 currently not loanable)", ebook_count.result) def test__run_self_tests_short_circuit(self): """Simulate a self-test run on an improperly configured site. """ error = dict(message='Invalid library id is provided or permission denied') class Mock(MockRBDigitalAPI): def get_ebook_availability_info(self, media_type): return error api = Mock(self._db, self.collection) [result] = api._run_self_tests(self._db) # We gave up after the first test failed. eq_("Counting ebooks in collection", result.name) eq_("Invalid library id is provided or permission denied", result.exception.message) eq_(repr(error), result.exception.debug_message) def test_external_integration(self): eq_(self.collection.external_integration, self.api.external_integration(self._db)) def queue_initial_patron_id_lookup(self, api=None): """All the RBDigitalAPI methods that take a Patron object call self.patron_remote_identifier() immediately, to find the patron's RBdigital ID. Since the default_patron starts out without a Credential containing that ID, this means making a request to the RBdigital API to look up an existing ID. If that lookup fails, it means a call to create_patron() and another API call. It's important to test that all these methods call patron_remote_identifier(), so this helper method queues up a response to the "lookup" request that makes it look like the Patron has an RBdigital ID but for whatever reason they are missing their Credential. """ api = api or self.api patron_datastr, datadict = api.get_data( "response_patron_internal_id_found.json" ) api.queue_response(status_code=200, content=patron_datastr) def queue_fetch_patron_bearer_token(self, api=None): """Queue responses for the API calls used to obtain a patron bearer token. RBDigitalAPI.fetch_patron_bearer_token requires three API calls. This method makes it easier and less error-prone to set up for that. """ api = api or self.api for filename in ( "response_patron_info_found.json", "response_patron_internal_id_found.json", "response_patron_bearer_token_success.json", ): datastr, datadict = api.get_data(filename) api.queue_response(status_code=200, content=datastr) def _assert_patron_has_remote_identifier_credential( self, patron, external_id ): """Assert that the given Patron has a permanent Credential storing their RBdigital ID. """ [credential] = patron.credentials eq_(DataSource.RB_DIGITAL, credential.data_source.name) eq_(Credential.IDENTIFIER_FROM_REMOTE_SERVICE, credential.type) eq_(external_id, credential.credential) eq_(None, credential.expires) def _set_notification_address(self, library): """Set the default notification address for the given library. This is necessary to create RBdigital user accounts for its patrons. """ ConfigurationSetting.for_library( Configuration.DEFAULT_NOTIFICATION_EMAIL_ADDRESS, library ).value = 'genericemail@library.org' def test_create_identifier_strings(self): identifier = self._identifier() values = RBDigitalAPI.create_identifier_strings(["foo", identifier]) eq_(["foo", identifier.identifier], values) def test_availability_exception(self): self.api.queue_response(500) assert_raises_regexp( BadResponseException, "Bad response from availability_search", self.api.get_all_available_through_search ) def test_search(self): datastr, datadict = self.api.get_data("response_search_one_item_1.json") self.api.queue_response(status_code=200, content=datastr) response = self.api.search(mediatype='ebook', author="Alexander Mccall Smith", title="Tea Time for the Traditionally Built") response_dictionary = response.json() eq_(1, response_dictionary['pageCount']) eq_(u'Tea Time for the Traditionally Built', response_dictionary['items'][0]['item']['title']) def test_get_all_available_through_search(self): datastr, datadict = self.api.get_data("response_search_five_items_1.json") self.api.queue_response(status_code=200, content=datastr) response_dictionary = self.api.get_all_available_through_search() eq_(1, response_dictionary['pageCount']) eq_(5, response_dictionary['resultSetCount']) eq_(5, len(response_dictionary['items'])) returned_titles = [iteminterest['item']['title'] for iteminterest in response_dictionary['items']] assert (u'Unusual Uses for Olive Oil' in returned_titles) def test_get_all_catalog(self): datastr, datadict = self.api.get_data("response_catalog_all_sample.json") self.api.queue_response(status_code=200, content=datastr) catalog = self.api.get_all_catalog() eq_( [u'Tricks', u'Emperor Mage: The Immortals', u'In-Flight Russian'], [x['title'] for x in catalog] ) def test_fuzzy_binary_searcher(self): # A fuzzy binary searcher sorts an array by its key, and then must either: # - find an exact match, if one exists; or # - return an "adjacent" index and the direction in which a match # would have been found, had one existed. array = [5, 3, 10, 19, -1, 8, -7] # => [-7, -1, 3, 5, 8, 10, 19] search = self.api._FuzzyBinarySearcher(array) nine_idx, nine_rel = search(9) eq_((nine_idx == 4 and nine_rel == search.INDEXED_LESS_THAN_MATCH) or (nine_idx == 4 and nine_rel == search.INDEXED_GREATER_THAN_MATCH), True) ten = search(10) eq_(True, ten == (5, search.INDEXED_EQUALS_MATCH)) neg5 = search(-5) eq_(True, neg5 == (0, search.INDEXED_LESS_THAN_MATCH) or (1, search.INDEXED_GREATER_THAN_MATCH)) # make sure we can hit the edges neg7 = search(-7) nineteen = search(19) eq_(True, neg7 == (0, search.INDEXED_EQUALS_MATCH)) eq_(True, nineteen == (6, search.INDEXED_EQUALS_MATCH)) # and beyond the edges neg100 = search(-100) pos100 = search(100) eq_(True, neg100 == (0, search.INDEXED_GREATER_THAN_MATCH)) eq_(True, pos100 == (6, search.INDEXED_LESS_THAN_MATCH)) # Lookups in more complicated objects _, snapshots = self.api.get_data("response_catalog_availability_dates_multi.json") snapshots_max_index = len(snapshots) -1 # The following are the earliest and latest dates in the snapshot test file. first_snapshot = "2016-04-01" last_snapshot = "2020-04-14" # dates that are well before and well after any available snapshot neg_infinity = "1960-01-01" pos_infinity = "2999-12-31" # create the searcher object snap_date_searcher = self.api._FuzzyBinarySearcher(snapshots, key=lambda s: s["asOf"]) sorted_snapshots = snap_date_searcher.sorted_list eq_(first_snapshot, sorted_snapshots[0]["asOf"]) eq_(last_snapshot, sorted_snapshots[snapshots_max_index]["asOf"]) first = snap_date_searcher(first_snapshot) last = snap_date_searcher(last_snapshot) eq_(first, (0, snap_date_searcher.INDEXED_EQUALS_MATCH)) eq_(last, (snapshots_max_index, snap_date_searcher.INDEXED_EQUALS_MATCH)) very_neg = snap_date_searcher(neg_infinity) very_pos = snap_date_searcher(pos_infinity) eq_(very_neg, (0, snap_date_searcher.INDEXED_GREATER_THAN_MATCH)) eq_(very_pos, (snapshots_max_index, snap_date_searcher.INDEXED_LESS_THAN_MATCH)) assert_raises_regexp( TypeError, ".*'key' must be 'None' or a callable.", self.api._FuzzyBinarySearcher, snapshots, key="not a callable" ) def test_align_delta_dates_to_available_snapshots(self): datastr, datadict = self.api.get_data("response_catalog_availability_dates_multi.json") # The following are the earliest and latest dates in the snapshot test file. first_snapshot = "2016-04-01" last_snapshot = "2020-04-14" # A missing begin date should be assigned the date of the earliest # snapshot; a missing end date, should get the date of the latest. self.api.queue_response(status_code=200, content=datastr) from_date, to_date = self.api.align_dates_to_available_snapshots() eq_(first_snapshot, from_date) eq_(last_snapshot, to_date) # Items at the temporal beginning and end of # the snapshot list should match when specified self.api.queue_response(status_code=200, content=datastr) from_date, to_date = self.api.align_dates_to_available_snapshots(from_date=first_snapshot, to_date=last_snapshot) eq_(first_snapshot, from_date) eq_(last_snapshot, to_date) # A unmatched from_date should be assigned the date of the previous # snapshot (or the first snapshot, if there is not an earlier one). # An unmatched to_date should be assigned the date of the next # snapshot (or the last snapshot, if there is not a later one). self.api.queue_response(status_code=200, content=datastr) from_date, to_date = self.api.align_dates_to_available_snapshots(from_date="2016-06-15", to_date="2020-03-22") eq_("2016-06-01", from_date) eq_("2020-03-22", to_date) self.api.queue_response(status_code=200, content=datastr) from_date, to_date = self.api.align_dates_to_available_snapshots(from_date="2016-05-31", to_date="2016-09-02") eq_("2016-05-01", from_date) eq_("2016-10-01", to_date) self.api.queue_response(status_code=200, content=datastr) from_date, to_date = self.api.align_dates_to_available_snapshots(from_date="1960-01-01", to_date="2999-12-31") eq_(first_snapshot, from_date) eq_(last_snapshot, to_date) # date alignment cannot work without at least one snapshot self.api.queue_response(status_code=200, content=u"[]") assert_raises_regexp( BadResponseException, ".*RBDigital available-dates response contains no snapshots.", self.api.align_dates_to_available_snapshots, from_date="2000-02-02", to_date="2000-01-01" ) self.api.queue_response(status_code=200, content=u"[]") assert_raises_regexp( BadResponseException, ".*RBDigital available-dates response contains no snapshots.", self.api.align_dates_to_available_snapshots ) # exception for invalid json self.api.queue_response(status_code=200, content="this is not JSON") assert_raises_regexp( BadResponseException, ".*RBDigital available-dates response not parsable.", self.api.align_dates_to_available_snapshots ) def test_get_delta(self): assert_raises_regexp( ValueError, 'from_date 2000-02-02 cannot be after to_date 2000-01-01.', self.api.get_delta, from_date="2000-02-02", to_date="2000-01-01" ) # The effective begin and end snapshot dates (after availability alignment) # cannot be the same. # This can happen when from_date and to_date from the call were the same # and there is an exact snapshot date match, ... available_dates_string, datadict = self.api.get_data("response_catalog_availability_dates_multi.json") self.api.queue_response(status_code=200, content=available_dates_string) assert_raises_regexp( ValueError, 'The effective begin and end RBDigital catalog snapshot dates cannot be the same.', self.api.get_delta, from_date="2020-04-01", to_date="2020-04-01" ) # but can also occur when: # - both dates are less than the date of the first snapshot, ... self.api.queue_response(status_code=200, content=available_dates_string) assert_raises_regexp( ValueError, 'The effective begin and end RBDigital catalog snapshot dates cannot be the same.', self.api.get_delta, from_date="1960-01-01", to_date="1960-01-02" ) # - both dates are greater than the date of the last snapshot, or ... self.api.queue_response(status_code=200, content=available_dates_string) assert_raises_regexp( ValueError, 'The effective begin and end RBDigital catalog snapshot dates cannot be the same.', self.api.get_delta, from_date="2999-12-31", to_date="2999-12-31" ) # - only a single snapshot is available datastr, datadict = self.api.get_data("response_catalog_availability_dates_only_one.json") self.api.queue_response(status_code=200, content=datastr) assert_raises_regexp( ValueError, 'The effective begin and end RBDigital catalog snapshot dates cannot be the same.', self.api.get_delta, from_date="1960-01-01", to_date="2999-12-31" ) self.api.queue_response(status_code=200, content=datastr) assert_raises_regexp( ValueError, 'The effective begin and end RBDigital catalog snapshot dates cannot be the same.', self.api.get_delta ) # Retrieving a delta requires first retrieving a list of dated # snapshots, then retrieving the changes between those dates. datastr, datadict = self.api.get_data("response_catalog_availability_dates_multi.json") self.api.queue_response(status_code=200, content=datastr) datastr, datadict = self.api.get_data("response_catalog_delta.json") self.api.queue_response(status_code=200, content=datastr) delta = self.api.get_delta() eq_(1931, delta["tenantId"]) eq_("2020-03-14", delta["beginDate"]) eq_("2020-04-14", delta["endDate"]) eq_(1, delta["booksAddedCount"]) eq_(1, delta["booksRemovedCount"]) eq_([{u'isbn': u'9781934180723', u'id': 1301944, u'mediaType': u'eAudio'}], delta["addedBooks"]) eq_([{u'isbn': u'9780590543439', u'id': 1031919, u'mediaType': u'eAudio'}], delta["removedBooks"]) def test_patron_remote_identifier_new_patron(self): # End-to-end test of patron_remote_identifier, in the case # where we are able to register the patron. class NeverHeardOfYouAPI(RBDigitalAPI): """A mock RBDigitalAPI that has never heard of any patron and returns a known ID as a way of registering them. """ def patron_remote_identifier_lookup(self, patron): """This API has never heard of any patron.""" return None def create_patron(self, *args, **kwargs): self.called_with = args return "rbdigital internal id" api = NeverHeardOfYouAPI(self._db, self.collection) patron = self.default_patron # If it turns out the API has never heard of a given patron, a # second call is made to create_patron(). eq_("rbdigital internal id", api.patron_remote_identifier(patron)) library, authorization_identifier, email_address = api.called_with # A permanent Credential has been created for the remote # identifier. self._assert_patron_has_remote_identifier_credential( patron, "rbdigital internal id" ) # The patron's library and authorization identifier were passed # into create_patron. eq_(patron.library, library) eq_(patron.authorization_identifier, authorization_identifier) # We didn't set up the patron with a fake email address, # so we weren't able to find anything and no email address # was passed into create_patron. eq_(None, email_address) def test_patron_remote_identifier_existing_patron(self): # End-to-end test of patron_remote_identifier, in the case # where we already know the patron's internal RBdigital ID. class IKnowYouAPI(RBDigitalAPI): """A mock RBDigitalAPI that has heard of any given patron but will refuse to register a new patron. """ def patron_remote_identifier_lookup(self, patron): return "i know you" def create_patron(self, *args, **kwargs): raise Exception("No new patrons!") api = IKnowYouAPI(self._db, self.collection) patron = self.default_patron # If it turns out the API has heard of a given patron, no call # is made to create_patron() -- if it happened here the test # would explode. eq_("i know you", api.patron_remote_identifier(patron)) # A permanent Credential has been created for the remote # identifier. self._assert_patron_has_remote_identifier_credential( patron, "i know you" ) def test_patron_remote_identifier(self): # Mocked-up test of patron_remote_identifier, as opposed to # the tests above, which mock only the methods that would # access the RBdigital API. class Mock(MockRBDigitalAPI): called_with = None def _find_or_create_remote_account(self, patron): if self.called_with: raise Exception("I was already called!") self.called_with = patron return "rbdigital internal id" # The first time we call patron_remote_identifier, # _find_or_create_remote_account is called, and the result is # associated with a Credential for the patron. api = Mock(self._db, self.collection, base_path=self.base_path) patron = self._patron() eq_("rbdigital internal id", api.patron_remote_identifier(patron)) self._assert_patron_has_remote_identifier_credential( patron, "rbdigital internal id" ) eq_(patron, api.called_with) # The second time, _find_or_create_remove_account is _not_ # called -- calling the mock method again would raise an # exception. Instead, the cached Credential is returned. eq_("rbdigital internal id", api.patron_remote_identifier(patron)) def test__find_or_create_remote_account(self): # If the remote lookup succeeds (because the patron already # made an account using their barcode), create_patron() is not # called. class RemoteLookupSucceeds(MockRBDigitalAPI): def patron_remote_identifier_lookup(self, identifier): self.patron_remote_identifier_lookup_called_with = identifier return "an internal ID" def create_patron(self): raise Exception("I'll never be called.") api = RemoteLookupSucceeds( self._db, self.collection, base_path=self.base_path ) patron = self._patron("a barcode") patron.authorization_identifier = "a barcode" eq_("an internal ID", api._find_or_create_remote_account(patron)) eq_("a barcode", api.patron_remote_identifier_lookup_called_with) # If the remote lookup fails, create_patron() is called # with the patron's library, authorization identifier, and # email address. class RemoteLookupFails(MockRBDigitalAPI): def patron_remote_identifier_lookup(self, identifier): self.patron_remote_identifier_lookup_called_with = identifier return None def create_patron(self, *args, **kwargs): self.create_patron_called_with_args = args self.create_patron_called_with_kwargs = kwargs return "an internal ID" def patron_email_address(self, patron): self.patron_email_address_called_with = patron return "mock email address" api = RemoteLookupFails( self._db, self.collection, base_path=self.base_path ) eq_("an internal ID", api._find_or_create_remote_account(patron)) eq_("a barcode", api.patron_remote_identifier_lookup_called_with) eq_(patron, api.patron_email_address_called_with) eq_((patron.library, patron.authorization_identifier, "mock email address"), api.create_patron_called_with_args) actual_create_keywords_keys = sorted(api.create_patron_called_with_kwargs.keys()) allowed_create_keywords_keys = ["bearer_token_handler"] expected_create_keywords_keys = sorted(["bearer_token_handler"]) # allowing kwargs keys for key in api.create_patron_called_with_kwargs.keys(): assert key in allowed_create_keywords_keys # expected kwargs keys eq_(actual_create_keywords_keys, expected_create_keywords_keys) # If a remote lookup fails, and create patron fails with a # RemotePatronCreationFailedException we will try to do a patron # lookup with the email address instead. class RemoteLookupFailAndRecovery(MockRBDigitalAPI): patron_remote_identifier_lookup_called_with = [] patron_email_address_called_with = [] def patron_remote_identifier_lookup(self, identifier): self.patron_remote_identifier_lookup_called_with.append(identifier) if len(self.patron_remote_identifier_lookup_called_with) == 1: return None else: return "an internal ID" def create_patron(self, *args, **kwargs): raise RemotePatronCreationFailedException def patron_email_address(self, patron): self.patron_email_address_called_with.append(patron) return "mock email address" api = RemoteLookupFailAndRecovery( self._db, self.collection, base_path=self.base_path ) eq_("an internal ID", api._find_or_create_remote_account(patron)) eq_(["a barcode", "mock email address"], api.patron_remote_identifier_lookup_called_with) eq_([patron, patron], api.patron_email_address_called_with) # If a remote lookup fails, and create patron fails with a # RemotePatronCreationFailedException we will try to do a patron # lookup with the email address instead, but if that fails as well # we just pass on the exception. class RemoteLookupFailAndRecoveryAndFail(MockRBDigitalAPI): patron_remote_identifier_lookup_called_with = [] patron_email_address_called_with = [] def patron_remote_identifier_lookup(self, identifier): self.patron_remote_identifier_lookup_called_with.append(identifier) return None def create_patron(self, *args, **kwargs): raise RemotePatronCreationFailedException def patron_email_address(self, patron): self.patron_email_address_called_with.append(patron) return "mock email address" api = RemoteLookupFailAndRecoveryAndFail( self._db, self.collection, base_path=self.base_path ) assert_raises(RemotePatronCreationFailedException, api._find_or_create_remote_account, patron) eq_(["a barcode", "mock email address"], api.patron_remote_identifier_lookup_called_with) eq_([patron, patron], api.patron_email_address_called_with) def test_create_patron(self): # Test the method that creates an RBdigital account for a # library patron. class Mock(MockRBDigitalAPI): def _create_patron_body( self, library, authorization_identifier, email_address ): self.called_with = ( library, authorization_identifier, email_address ) api = Mock(self._db, self.collection, base_path=self.base_path) # Test the case where the patron can be created. datastr, datadict = api.get_data( "response_patron_create_success.json" ) api.queue_response(status_code=201, content=datastr) args = "library", "auth", "email" patron_rbdigital_id = api.create_patron(*args) # The arguments we passed in were propagated to _create_patron_body. eq_(args, api.called_with) # The return value is the internal ID RBdigital established for this # patron. eq_(940000, patron_rbdigital_id) # Test the case where the patron already exists. datastr, datadict = api.get_data("response_patron_create_fail_already_exists.json") api.queue_response(status_code=409, content=datastr) assert_raises_regexp( RemotePatronCreationFailedException, 'create_patron: http=409, response={"message":"A patron account with the specified username, email address, or card number already exists for this library."}', api.create_patron, *args ) def test__find_or_create_create_patron_caches_bearer_token(self): # Test that the method that creates an RBDigital account caches # the patron bearer token, when it is returned in the response. class MockAPI(MockRBDigitalAPI): # Simulate no RBdigital account ... def patron_remote_identifier_lookup(self, *args, **kwargs): return None # and an email is needed for the def dummy_email_address(self, library, authorization_identifier): return 'fake_email' api = MockAPI(self._db, self.collection, base_path=self.base_path) patron = self._patron("a barcode") patron.authorization_identifier = "a barcode" # Create the patron and ensure that the bearer token credential has # been created. datastr, datadict = api.get_data( "response_patron_create_success.json" ) api.queue_response(status_code=201, content=datastr) expected_bearer_token = datadict['bearer'] expected_patron_rbd_id = datadict['patron']['patronId'] # Call the method patron_rbdigital_id = api._find_or_create_remote_account(patron) [credential] = patron.credentials # Should return the RBdigital `patronId` property from the response. eq_(expected_patron_rbd_id, patron_rbdigital_id) # And we should have a credential with the bearer token. eq_(expected_bearer_token, credential.credential) eq_(api.CREDENTIAL_TYPES[api.BEARER_TOKEN_PROPERTY]['label'], credential.type) eq_(DataSource.RB_DIGITAL, credential.data_source.name) eq_(self.collection.id, credential.collection_id) assert credential.expires is not None def test_patron_remote_identifier_exception(self): # Make sure if there is an exception while creating the patron we don't # create empty credentials in the database. class ApiThrowsException(MockRBDigitalAPI): def _find_or_create_remote_account(self, patron): raise CirculationException patron = self._patron("a barcode") api = ApiThrowsException(self._db, self.collection, base_path=self.base_path) assert_raises(CirculationException, api.patron_remote_identifier, patron) data_source = DataSource.lookup(self._db, DataSource.RB_DIGITAL) credential, new = get_one_or_create( self._db, Credential, data_source=data_source, type=Credential.IDENTIFIER_FROM_REMOTE_SERVICE, patron=patron, collection=api.collection ) eq_(True, new) def test__create_patron_body(self): # Test the method that builds the data (possibly fake, possibly not) # for an RBdigital patron creation call. class Mock(MockRBDigitalAPI): dummy_patron_identifier_called_with = None dummy_email_address_called_with = None def dummy_patron_identifier(self, authorization_identifier): self.dummy_patron_identifier_called_with = ( authorization_identifier ) return "dummyid" def dummy_email_address(self, library, authorization_identifier): self.dummy_email_address_called_with = ( library, authorization_identifier ) return "dummy@email" api = Mock(self._db, self.collection, base_path=self.base_path) # Test the case where a 'real' email address is provided. library = object() identifier = "auth_identifier" email = "me@email" body = api._create_patron_body(library, identifier, email) # We can't test the password, even by seeding the random # number generator, because it's generated with os.urandom(), # but we can verify that it's the right length. password = body.pop("password") eq_(16, len(password)) # And we can directly check every other value. expect = { 'userName': identifier, 'firstName': 'Library', 'libraryCard': identifier, 'lastName': 'Simplified', 'postalCode': '11111', 'libraryId': api.library_id, 'email': email } eq_(expect, body) # dummy_patron_identifier and dummy_email_address were not called, # since we're able to create an RBdigital account that the patron # can use through other means. eq_(None, api.dummy_patron_identifier_called_with) eq_(None, api.dummy_email_address_called_with) # Test the case where no 'real' email address is provided. body = api._create_patron_body(library, identifier, None) body.pop("password") expect = { 'userName': 'dummyid', 'firstName': 'Library', 'libraryCard': 'dummyid', 'lastName': 'Simplified', 'postalCode': '11111', 'libraryId': api.library_id, 'email': 'dummy@email' } eq_(expect, body) # dummy_patron_identifier and dummy_email_address were called. eq_(identifier, api.dummy_patron_identifier_called_with) eq_((library, identifier), api.dummy_email_address_called_with) def test_dummy_patron_identifier(self): random.seed(42) patron = self.default_patron auth = patron.authorization_identifier remote_auth = self.api.dummy_patron_identifier(auth) # The dummy identifier is the input identifier plus # 6 random characters. eq_(auth + "N098QO", remote_auth) # It's different every time. remote_auth = self.api.dummy_patron_identifier(auth) eq_(auth + "W3F17I", remote_auth) def test_dummy_email_address(self): patron = self.default_patron library = patron.library auth = patron.authorization_identifier m = self.api.dummy_email_address # Without a setting for DEFAULT_NOTIFICATION_EMAIL_ADDRESS, we # can't calculate the email address to send RBdigital for a # patron. assert_raises_regexp( RemotePatronCreationFailedException, "Cannot create remote account for patron because library's default notification address is not set.", m, patron, auth ) self._set_notification_address(patron.library) address = m(patron, auth) eq_("genericemail+rbdigital-%s@library.org" % auth, address) def test_patron_remote_identifier_lookup(self): # Test the method that tries to convert a patron identifier # (e.g. the one the patron uses to authenticate with their # library) to an internal RBdigital patron ID. m = self.api.patron_remote_identifier_lookup identifier = self._str # Test the case where RBdigital doesn't recognize the identifier # we're using. datastr, datadict = self.api.get_data( "response_patron_internal_id_not_found.json" ) self.api.queue_response(status_code=200, content=datastr) rbdigital_patron_id = m(identifier) eq_(None, rbdigital_patron_id) # Test the case where RBdigital recognizes the identifier # we're using. self.queue_initial_patron_id_lookup() rbdigital_patron_id = m(identifier) eq_(939981, rbdigital_patron_id) # Test the case where RBdigital sends an error because it # doesn't like our input. datastr, datadict = self.api.get_data( "response_patron_internal_id_error.json" ) self.api.queue_response(status_code=500, content=datastr) assert_raises_regexp( InvalidInputException, "patron_id:", m, identifier ) def test_get_ebook_availability_info(self): datastr, datadict = self.api.get_data("response_availability_ebook_1.json") self.api.queue_response(status_code=200, content=datastr) response_list = self.api.get_ebook_availability_info() eq_(u'9781420128567', response_list[0]['isbn']) eq_(False, response_list[0]['availability']) def test_get_metadata_by_isbn(self): datastr, datadict = self.api.get_data("response_isbn_notfound_1.json") self.api.queue_response(status_code=200, content=datastr) response_dictionary = self.api.get_metadata_by_isbn('97BADISBNFAKE') eq_(None, response_dictionary) self.api.queue_response(status_code=404, content="{}") assert_raises_regexp( BadResponseException, "Bad response from .*", self.api.get_metadata_by_isbn, identifier='97BADISBNFAKE' ) datastr, datadict = self.api.get_data("response_isbn_found_1.json") self.api.queue_response(status_code=200, content=datastr) response_dictionary = self.api.get_metadata_by_isbn('9780307378101') eq_(u'9780307378101', response_dictionary['isbn']) eq_(u'Anchor', response_dictionary['publisher']) def test_populate_all_catalog(self): # Test the method that retrieves the entire catalog from RBdigital # and mirrors it locally. datastr, datadict = self.get_data("response_catalog_all_sample.json") self.api.queue_response(status_code=200, content=datastr) result = self.api.populate_all_catalog() # populate_all_catalog returns two numbers, as required by # RBDigitalSyncMonitor. eq_((3, 3), result) # We created three presentation-ready works. works = sorted( self._db.query(Work).all(), key=lambda x: x.title ) emperor, russian, tricks = works eq_("Emperor Mage: The Immortals", emperor.title) eq_("In-Flight Russian", russian.title) eq_("Tricks", tricks.title) eq_(["9781934180723", "9781400024018", "9781615730186"], [x.license_pools[0].identifier.identifier for x in works]) for w in works: [pool] = w.license_pools # We know we own licenses for this book. eq_(1, pool.licenses_owned) # We _presume_ that this book is lendable. We may find out # differently the next time we run the availability # monitor. eq_(1, pool.licenses_available) def test_populate_delta(self): # A title we don't know about -- "Emperor Mage: The Immortals" # is about to be added to the collection. # This title ("Greatest: Muhammad Ali, The") is about to be # removed from the collection. ali, ignore = LicensePool.for_foreign_id( self._db, DataSource.RB_DIGITAL, Identifier.RB_DIGITAL_ID, "9780590543439", collection=self.collection ) ali.licenses_owned = 10 ali.licenses_available = 9 ali.licenses_reserved = 2 ali.patrons_in_hold_queue = 1 # This title ("Tricks") is not mentioned in the delta, so it # will be left alone. tricks, ignore = LicensePool.for_foreign_id( self._db, DataSource.RB_DIGITAL, Identifier.RB_DIGITAL_ID, "9781615730186", collection=self.collection ) tricks.licenses_owned = 10 tricks.licenses_available = 5 # Retrieving a delta requires first retrieving a list of dated # snapshots, then retrieving the changes between those dates. datastr, datadict = self.get_data("response_catalog_availability_dates_multi.json") self.api.queue_response(status_code=200, content=datastr) datastr, datadict = self.get_data("response_catalog_delta.json") self.api.queue_response(status_code=200, content=datastr) # RBDigitalAPI.populate_delta then retrieves a complete media entry # for the ISBN of each added item. This is not needed for removals. datastr, datadict = self.get_data("response_catalog_media_isbn.json") self.api.queue_response(status_code=200, content=datastr) result = self.api.populate_delta( today=datetime.datetime(2020,04,30) ) # populate_delta returns two numbers, as required by # RBDigitalSyncMonitor. eq_((2, 2), result) # "Tricks" has not been modified. eq_(10, tricks.licenses_owned) eq_(5, tricks.licenses_available) # "Greatest: Muhammad Ali, The" is still known to the system, # but its circulation data has been updated to indicate the # fact that this collection has no licenses. eq_(0, ali.licenses_owned) eq_(0, ali.licenses_available) eq_(0, ali.licenses_reserved) eq_(0, ali.patrons_in_hold_queue) # "Emperor Mage: The Immortals" is now known to the system emperor, ignore = LicensePool.for_foreign_id( self._db, DataSource.RB_DIGITAL, Identifier.RB_DIGITAL_ID, "9781934180723", collection=self.collection ) work = emperor.work eq_("Emperor Mage", work.title) eq_(True, work.presentation_ready) # However, we have not set availability information on this # title. That will happen (for all titles) the next time # RBDigitalAPI.process_availability is called. eq_(0, emperor.licenses_owned) eq_(0, emperor.licenses_available) def test_populate_delta_remove_item_missing_metadata(self): item_media_str, item_media = self.get_data("response_catalog_media_isbn.json") _, add_remove_same_delta = self.get_data("response_catalog_delta.json") add_remove_same_delta["addedBooks"] = [ { "id": 1301944, "isbn": item_media["isbn"], "mediaType": item_media["mediaType"] } ] add_remove_same_delta["booksAddedCount"] = 1 add_remove_same_delta["removedBooks"] = add_remove_same_delta["addedBooks"] add_remove_same_delta["booksRemovedCount"] = add_remove_same_delta["booksAddedCount"] # ensure test conditions are valid eq_(item_media["isbn"], add_remove_same_delta["addedBooks"][0]["isbn"]) eq_(item_media["isbn"], add_remove_same_delta["removedBooks"][0]["isbn"]) eq_(1, len(add_remove_same_delta["removedBooks"])) add_remove_same_delta["addedBooks"] = add_remove_same_delta["removedBooks"] add_remove_same_delta["booksAddedCount"] = add_remove_same_delta["booksRemovedCount"] delta_no_remove_isbn = json.loads(json.dumps(add_remove_same_delta)) _ = delta_no_remove_isbn["removedBooks"][0].pop("isbn") class GoodMetaRBDigitalAPI(MockRBDigitalAPI): def get_delta(self, *args, **kwargs): return add_remove_same_delta api = GoodMetaRBDigitalAPI(self._db, self.collection, base_path=self.base_path) api.queue_response(status_code=200, content=item_media_str) items_transmitted, items_updated = api.populate_delta() eq_(2, items_transmitted) eq_(2, items_updated) # Exercise RBDigitalAPI.populate_delta when attempting to # remove item with no metadata. class NoneMetaRBDigitalAPI(MockRBDigitalAPI): def get_delta(self, *args, **kwargs): return delta_no_remove_isbn api = NoneMetaRBDigitalAPI(self._db, self.collection, base_path=self.base_path) api.queue_response(status_code=200, content=item_media_str) items_transmitted, items_updated = api.populate_delta() eq_(2, items_transmitted) eq_(1, items_updated) def test_circulate_item(self): edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781441260468' ) datastr, datadict = self.api.get_data("response_checkout_success.json") self.api.queue_response(status_code=200, content=datastr) patron = self.default_patron # We don't need to go through the process of establishing this # patron's RBdigital ID -- just make one up. rbdigital_id = self._str # borrow functionality checks response_dictionary = self.api.circulate_item(rbdigital_id, edition.primary_identifier.identifier) assert('error_code' not in response_dictionary) eq_("9781441260468", response_dictionary['isbn']) eq_("SUCCESS", response_dictionary['output']) eq_(False, response_dictionary['canRenew']) #eq_(9828517, response_dictionary['transactionId']) eq_(939981, response_dictionary['patronId']) eq_(1931, response_dictionary['libraryId']) request_url, request_args, request_kwargs = self.api.requests[-1] assert "checkouts" in request_url eq_("post", request_kwargs.get("method")) datastr, datadict = self.api.get_data("response_checkout_unavailable.json") self.api.queue_response(status_code=409, content=datastr) assert_raises_regexp( NoAvailableCopies, "Title is not available for checkout", self.api.circulate_item, rbdigital_id, edition.primary_identifier.identifier ) request_url, request_args, request_kwargs = self.api.requests[-1] assert "checkouts" in request_url eq_("post", request_kwargs.get("method")) # book return functionality checks self.api.queue_response(status_code=200, content="") response_dictionary = self.api.circulate_item(rbdigital_id, edition.primary_identifier.identifier, return_item=True) eq_({}, response_dictionary) request_url, request_args, request_kwargs = self.api.requests[-1] assert "checkouts" in request_url eq_("delete", request_kwargs.get("method")) datastr, datadict = self.api.get_data("response_return_unavailable.json") self.api.queue_response(status_code=409, content=datastr) assert_raises_regexp( NotCheckedOut, "checkin:", self.api.circulate_item, rbdigital_id, edition.primary_identifier.identifier, return_item=True ) request_url, request_args, request_kwargs = self.api.requests[-1] assert "checkouts" in request_url eq_("delete", request_kwargs.get("method")) # hold functionality checks datastr, datadict = self.api.get_data("response_patron_hold_success.json") self.api.queue_response(status_code=200, content=datastr) response = self.api.circulate_item(rbdigital_id, edition.primary_identifier.identifier, hold=True) eq_(9828560, response) request_url, request_args, request_kwargs = self.api.requests[-1] assert "holds" in request_url eq_("post", request_kwargs.get("method")) datastr, datadict = self.api.get_data("response_patron_hold_fail_409_reached_limit.json") self.api.queue_response(status_code=409, content=datastr) response = self.api.circulate_item(rbdigital_id, edition.primary_identifier.identifier, hold=True) eq_("You have reached your checkout limit and therefore are unable to place additional holds.", response) request_url, request_args, request_kwargs = self.api.requests[-1] assert "holds" in request_url eq_("post", request_kwargs.get("method")) def test_checkin(self): # Returning a book is, for now, more of a "notify RBDigital that we've # returned through Adobe" formality than critical functionality. # There's no information returned from the server on success, so we use a # boolean success flag. patron = self.default_patron self.queue_initial_patron_id_lookup() edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781441260468' ) work = self._work(presentation_edition=edition) # queue checkin success self.api.queue_response(status_code=200, content='{"message": "success"}') success = self.api.checkin(patron, None, pool) eq_(True, success) # queue unexpected non-empty response from the server self.api.queue_response(status_code=200, content=json.dumps({"error_code": "error"})) assert_raises(CirculationException, self.api.checkin, patron, None, pool) def test_checkout(self): # Ebooks and audiobooks have different loan durations. ebook_period = self.api.collection.default_loan_period( self._default_library, Edition.BOOK_MEDIUM ) audio_period = self.api.collection.default_loan_period( self._default_library, Edition.AUDIO_MEDIUM ) assert ebook_period != audio_period patron = self.default_patron self.queue_initial_patron_id_lookup() edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781441260468' ) work = self._work(presentation_edition=edition) # The second request will actually check out the book. datastr, datadict = self.api.get_data("response_checkout_success.json") self.api.queue_response(status_code=200, content=datastr) loan_info = self.api.checkout(patron, None, pool, None) checkout_url = self.api.requests[-1][0] assert "days=%s" % ebook_period in checkout_url # Now we have a LoanInfo that describes the remote loan. eq_(Identifier.RB_DIGITAL_ID, loan_info.identifier_type) eq_(pool.identifier.identifier, loan_info.identifier) today = datetime.datetime.utcnow() assert (loan_info.start_date - today).total_seconds() < 20 assert (loan_info.end_date - today).days <= ebook_period # But we can only get a FulfillmentInfo by calling # get_patron_checkouts(). eq_(None, loan_info.fulfillment_info) # Try the checkout again but pretend that we're checking out # an audiobook. # edition.medium = Edition.AUDIO_MEDIUM self.api.queue_response(status_code=200, content=datastr) loan_info = self.api.checkout(patron, None, pool, None) # We requested a different loan duration. checkout_url = self.api.requests[-1][0] assert "days=%s" % audio_period in checkout_url assert (loan_info.end_date - today).days <= audio_period def test_fulfill(self): patron = self.default_patron self.queue_initial_patron_id_lookup() identifier = self._identifier( identifier_type=Identifier.RB_DIGITAL_ID, foreign_id='9781426893483') edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781426893483' ) # The first request will look up the patron's current loans. datastr, datadict = self.api.get_data("response_patron_checkouts_200_list.json") self.api.queue_response(status_code=200, content=datastr) found_fulfillment = self.api.fulfill(patron, None, pool, None) assert isinstance(found_fulfillment, RBFulfillmentInfo) # We have a FulfillmentInfo-like object, but it hasn't yet # made the second request that will give us the actual URL to # download. (We know this, because the response to that # request has not been queued yet.) # We'll need to obtain a patron bearer token for fulfillment # requests, so we'll queue the requisite responses up first. self.queue_fetch_patron_bearer_token() # Let's queue it up now. download_url = u"http://download_url/" epub_manifest = json.dumps({ "url": download_url, "type": Representation.EPUB_MEDIA_TYPE }) self.api.queue_response(status_code=200, content=epub_manifest) # Since the book being fulfilled is an EPUB, the # FulfillmentInfo returned contains a direct link to the EPUB. eq_(Identifier.RB_DIGITAL_ID, found_fulfillment.identifier_type) eq_(u'9781426893483', found_fulfillment.identifier) eq_(download_url, found_fulfillment.content_link) eq_(u'application/epub+zip', found_fulfillment.content_type) eq_(None, found_fulfillment.content) # The fulfillment link expires in about 14 minutes -- rather # than testing this exactly we estimate it. expires = found_fulfillment.content_expires now = datetime.datetime.utcnow() thirteen_minutes = now + datetime.timedelta(minutes=13) fifteen_minutes = now + datetime.timedelta(minutes=15) assert expires > thirteen_minutes assert expires < fifteen_minutes # Here's another pool that the patron doesn't have checked out. edition2, pool2 = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '123456789' ) # Since the Patron now has a Credential containing their # RBdigital ID, there will be no initial request looking up their # RBdigital ID. # Instead we'll go right to the list of active loans, where we'll # find out that the patron does not have an active loan for the # requested book. datastr, datadict = self.api.get_data("response_patron_checkouts_200_list.json") self.api.queue_response(status_code=200, content=datastr) # The patron can't fulfill the book if it's not one of their checkouts. assert_raises(NoActiveLoan, self.api.fulfill, patron, None, pool2, None) # Try again with a scenario where the patron has no active # loans at all. datastr, datadict = self.api.get_data("response_patron_checkouts_200_emptylist.json") self.api.queue_response(status_code=200, content=datastr) assert_raises(NoActiveLoan, self.api.fulfill, patron, None, pool, None) def test_fulfill_audiobook(self): """Verify that fulfilling an audiobook results in a manifest document. """ patron_bearer_token = 'd1544585ade0abcd7908ba0e' class MockAPI(MockRBDigitalAPI): # We'll need this to match the start of our download URLs PRODUCTION_BASE_URL = 'https://' api = MockAPI( self._db, self.collection, base_path=self.base_path ) patron = self.default_patron self.queue_initial_patron_id_lookup(api=api) audiobook_id = '9781449871789' identifier = self._identifier( identifier_type=Identifier.RB_DIGITAL_ID, foreign_id=audiobook_id) edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = audiobook_id ) # The only request we will make will be to look up the # patron's current loans. datastr, datadict = api.get_data( "response_patron_checkouts_with_audiobook.json" ) # Save the original parts of this item for later. original_parts = datadict[0]['files'] api.queue_response(status_code=200, content=datastr) def make_part_url(part): return "http://give-me-part/%s" % part # Not fulfilling a part found_fulfillment = api.fulfill( patron, None, pool, None, part=None, fulfill_part_url=make_part_url ) assert isinstance(found_fulfillment, RBFulfillmentInfo) # Now we were able to get a Readium Web Publication manifest for # the loan. We will proxy the links in the manifest. # `RBDigitalFulfillmentProxy.proxied_manifest` will need a patron # bearer token to rewrite the URLs, so we'll queue up the needed # responses before getting the manifest. self.queue_fetch_patron_bearer_token(api=api) eq_(Representation.AUDIOBOOK_MANIFEST_MEDIA_TYPE, found_fulfillment.content_type) # A manifest is associated with the FulfillmentInfo. manifest = found_fulfillment.manifest # The Unicode representation of the manifest is used as the # content to be sent to the client. output = json.loads(found_fulfillment.content) eq_('http://readium.org/webpub/default.jsonld', output['@context']) eq_('http://bib.schema.org/Audiobook', output['metadata']['@type']) # Ensure that we've consumed all of the queued responses so far eq_(0, len(api.responses)) # Each item in the manifest's readingOrder has a download url # generated by calling make_part_url(). # # This represents a reliable (but slower) way of obtaining an # MP3 file directly from the manifest, without having to know # how to process an RBdigital access document. # # NB: The faster way is to obtain the access document directly # from RBdigital, which is how we used to do it. But that now # requires a patron bearer token. This href & type used to be # provided as an alternate to that direct request. for i, part in enumerate(manifest.readingOrder): downloadUrl = original_parts[i]['downloadUrl'] # the expected download URL has the API base URL stripped off expected_downloadUrl = downloadUrl[len(api.PRODUCTION_BASE_URL):] expected_proxied_url = '{}/rbdproxy/{}?{}'.format( make_part_url(i), patron_bearer_token, urllib.urlencode({'url': expected_downloadUrl}) ) eq_(expected_proxied_url, part['href']) eq_("vnd.librarysimplified/rbdigital-access-document+json", part['type']) # Ensure that we've consumed all of the queued responses so far eq_(0, len(api.responses)) # This function will be used to validate the next few # fulfillment requests. def verify_fulfillment(): # We end up with a FulfillmentInfo that includes the link # mentioned in audiobook_chapter_access_document.json. chapter = api.fulfill(patron, None, pool, None, part=3, fulfill_part_url=lambda part: "http://does.not/matter") assert isinstance(chapter, FulfillmentInfo) eq_("http://book/chapter1.mp3", chapter.content_link) eq_("audio/mpeg", chapter.content_type) # We should have a cached bearer token now. And it should be unexpired. data_source = DataSource.lookup(self._db, DataSource.RB_DIGITAL) credential, new = get_one_or_create( self._db, Credential, data_source=data_source, type=api.CREDENTIAL_TYPES[api.BEARER_TOKEN_PROPERTY]['label'], patron=patron, collection=api.collection, ) eq_(False, new) assert credential.expires > datetime.datetime.utcnow() # Ensure that we've consumed all of the queued responses so far eq_(0, len(api.responses)) # Now let's try fulfilling one of those parts. # # We're going to make two requests this time -- one to get the # patron's current loans and one to get the RBdigital access # document. # # Before the second request, we'll check the cache for a patron # bearer token, which we need in order to authenticate access # document fulfillment. But we don't have one, so we'll need to # get one from the remote. We'll queue the responses for the # bearer token before the response for the fulfillment request. datastr, datadict = api.get_data("response_patron_checkouts_with_audiobook.json") api.queue_response(status_code=200, content=datastr) datastr, datadict = api.get_data("audiobook_chapter_access_document.json") api.queue_response(status_code=200, content=datastr) # And make sure everything went as expected. verify_fulfillment() # We have a cached bearer token now, so we should be able to make the # same request without queueing the patron bearer token response. datastr, datadict = api.get_data("response_patron_checkouts_with_audiobook.json") api.queue_response(status_code=200, content=datastr) datastr, datadict = api.get_data("audiobook_chapter_access_document.json") api.queue_response(status_code=200, content=datastr) verify_fulfillment() # Now we simulate having an unexpired cached bearer token that the remote # service has invalidated. When we attempt to fulfill the access document, # we receive a 401 response, which leads to requesting a fresh bearer # token. datastr, datadict = api.get_data("response_patron_checkouts_with_audiobook.json") api.queue_response(status_code=200, content=datastr) datastr, datadict = api.get_data("response_fullfillment_401_invalid_bearer_token.json") api.queue_response(status_code=401, content=datastr) self.queue_fetch_patron_bearer_token(api=api) datastr, datadict = api.get_data("audiobook_chapter_access_document.json") api.queue_response(status_code=200, content=datastr) verify_fulfillment() def test_patron_activity(self): # Get patron's current checkouts and holds. # Make sure LoanInfo objects were created and filled # with FulfillmentInfo objects. Make sure HoldInfo objects # were created. patron = self.default_patron self.queue_initial_patron_id_lookup() identifier = self._identifier( identifier_type=Identifier.RB_DIGITAL_ID, foreign_id='9781456103859') identifier = self._identifier( identifier_type=Identifier.RB_DIGITAL_ID, foreign_id='9781426893483') # queue checkouts list datastr, datadict = self.api.get_data("response_patron_checkouts_200_list.json") self.api.queue_response(status_code=200, content=datastr) # queue holds list datastr, datadict = self.api.get_data("response_patron_holds_200_list.json") self.api.queue_response(status_code=200, content=datastr) patron_activity = self.api.patron_activity(patron, None) eq_(Identifier.RB_DIGITAL_ID, patron_activity[0].identifier_type) eq_(u'9781456103859', patron_activity[0].identifier) eq_(None, patron_activity[0].start_date) eq_(datetime.date(2016, 11, 19), patron_activity[0].end_date) eq_(Identifier.RB_DIGITAL_ID, patron_activity[1].identifier_type) eq_(u'9781426893483', patron_activity[1].identifier) eq_(None, patron_activity[1].start_date) eq_(datetime.date(2016, 11, 19), patron_activity[1].end_date) eq_(Identifier.RB_DIGITAL_ID, patron_activity[2].identifier_type) eq_('9781426893483', patron_activity[2].identifier) eq_(None, patron_activity[2].start_date) eq_(datetime.date(2050, 12, 31), patron_activity[2].end_date) eq_(None, patron_activity[2].hold_position) def test_place_hold(self): "Test reserving a book." patron = self.default_patron self.queue_initial_patron_id_lookup() edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781441260468' ) # If the book is already on hold or already checked out, # CannotHold is raised. (It's not AlreadyOnHold/AlreadyCheckedOut # because we can't distinguish between the two cases.) datastr, datadict = self.api.get_data("response_patron_hold_fail_409_already_exists.json") self.api.queue_response(status_code=409, content=datastr) assert_raises_regexp( CannotHold, ".*Hold or Checkout already exists.", self.api.place_hold, patron, None, pool, None ) # If the patron has reached a limit and cannot place any more holds, # CannotHold is raised. datastr, datadict = self.api.get_data("response_patron_hold_fail_409_reached_limit.json") self.api.queue_response(status_code=409, content=datastr) assert_raises_regexp( CannotHold, ".*You have reached your checkout limit and therefore are unable to place additional holds.", self.api.place_hold, patron, None, pool, None ) # Finally let's test a successful hold. datastr, datadict = self.api.get_data("response_patron_hold_success.json") self.api.queue_response(status_code=200, content=datastr) hold_info = self.api.place_hold(patron, None, pool, None) eq_(Identifier.RB_DIGITAL_ID, hold_info.identifier_type) eq_(pool.identifier.identifier, hold_info.identifier) today = datetime.datetime.now() assert (hold_info.start_date - today).total_seconds() < 20 def test_release_hold(self): "Test releasing a book reservation early." patron = self.default_patron self.queue_initial_patron_id_lookup() edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, identifier_id = '9781441260468' ) # queue release success self.api.queue_response(status_code=200, content='{"message": "success"}') success = self.api.release_hold(patron, None, pool) eq_(True, success) # queue unexpected non-empty response from the server self.api.queue_response(status_code=200, content=json.dumps({"error_code": "error"})) assert_raises(CirculationException, self.api.release_hold, patron, None, pool) def test_update_licensepool_for_identifier(self): """Test the RBDigital implementation of the update_availability method defined by the CirculationAPI interface. """ # Update a LicensePool that doesn't exist yet, and it gets created. identifier = self._identifier(identifier_type=Identifier.RB_DIGITAL_ID) isbn = identifier.identifier.encode("ascii") # The BibliographicCoverageProvider gets called for a new license pool. self.api.queue_response(200, content=json.dumps({})) pool, is_new, circulation_changed = self.api.update_licensepool_for_identifier( isbn, True, 'ebook' ) eq_(True, is_new) eq_(True, circulation_changed) eq_(1, pool.licenses_owned) eq_(1, pool.licenses_available) [lpdm] = pool.delivery_mechanisms eq_(Representation.EPUB_MEDIA_TYPE, lpdm.delivery_mechanism.content_type) eq_(DeliveryMechanism.ADOBE_DRM, lpdm.delivery_mechanism.drm_scheme) # Create a LicensePool that needs updating. edition, pool = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, collection=self.collection ) # We have never checked the circulation information for this # LicensePool. Put some random junk in the pool to verify # that it gets changed. pool.licenses_owned = 5 pool.licenses_available = 3 pool.patrons_in_hold_queue = 3 eq_(None, pool.last_checked) isbn = pool.identifier.identifier.encode("ascii") pool, is_new, circulation_changed = self.api.update_licensepool_for_identifier( isbn, False, 'eaudio' ) eq_(False, is_new) eq_(True, circulation_changed) # The availability information has been updated, as has the # date the availability information was last checked. # # We still own a license, but it's no longer available for # checkout. eq_(1, pool.licenses_owned) eq_(0, pool.licenses_available) eq_(3, pool.patrons_in_hold_queue) assert pool.last_checked is not None # A delivery mechanism was also added to the pool. [lpdm] = pool.delivery_mechanisms eq_(Representation.AUDIOBOOK_MANIFEST_MEDIA_TYPE, lpdm.delivery_mechanism.content_type) eq_(None, lpdm.delivery_mechanism.drm_scheme) self.api.update_licensepool_for_identifier(isbn, True, 'ebook') eq_(1, pool.licenses_owned) eq_(1, pool.licenses_available) eq_(3, pool.patrons_in_hold_queue) class TestCirculationMonitor(RBDigitalAPITest): def test_run_once(self): # run_once() calls process_availability twice, once for # ebooks and once for audiobooks. class Mock(RBDigitalCirculationMonitor): process_availability_calls = [] def process_availability(self, media_type): self.process_availability_calls.append(media_type) # Pretend we processed three titles. return 3 monitor = Mock( self._db, self.collection, api_class=MockRBDigitalAPI, ) timestamp = monitor.timestamp().to_data() progress = monitor.run_once(timestamp) eq_(['eBook', 'eAudio'], monitor.process_availability_calls) # The TimestampData returned by run_once() describes its # achievements. eq_("Ebooks processed: 3. Audiobooks processed: 3.", progress.achievements) # The TimestampData does not include any timing information # -- that will be applied by run(). eq_(None, progress.start) eq_(None, progress.finish) def test_process_availability(self): monitor = RBDigitalCirculationMonitor( self._db, self.collection, api_class=MockRBDigitalAPI, api_class_kwargs=dict(base_path=self.base_path) ) eq_(ExternalIntegration.RB_DIGITAL, monitor.protocol) # Create a LicensePool that needs updating. edition_ebook, pool_ebook = self._edition( identifier_type=Identifier.RB_DIGITAL_ID, data_source_name=DataSource.RB_DIGITAL, with_license_pool=True, collection=self.collection ) pool_ebook.licenses_owned = 3 pool_ebook.licenses_available = 2 pool_ebook.patrons_in_hold_queue = 1 eq_(None, pool_ebook.last_checked) # Prepare availability information. datastr, datadict = monitor.api.get_data("response_availability_single_ebook.json") # Modify the data so that it appears to be talking about the # book we just created. new_identifier = pool_ebook.identifier.identifier.encode("ascii") datastr = datastr.replace("9781781107041", new_identifier) monitor.api.queue_response(status_code=200, content=datastr) item_count = monitor.process_availability() eq_(1, item_count) pool_ebook.licenses_available = 0 class TestRBFulfillmentInfo(RBDigitalAPITest): def test_fulfill_part(self): get_data = self.api.get_data ignore, [book] = get_data( "response_patron_checkouts_with_audiobook.json" ) proxied_cm_part_url = "a-proxy-url" manifest = AudiobookManifest(book, fulfill_part_url=lambda part: proxied_cm_part_url) part_files = book['files'] class MockFulfillmentRequestTracker(): def fulfillment_request(self, url): self.fulfillment_request_last_called_with = url data, ignore = get_data( "audiobook_chapter_access_document.json" ) return MockRequestsResponse(200, {}, data) # We have an RBFulfillmentInfo object and the underlying # AudiobookManifest has already been created. # When we're fulfilling a part, we need our manifest to # get the real -- not proxy -- fulfillment URLs. fulfill_part_url = object() fulfillment_proxy = RBDigitalFulfillmentProxy(self._patron, api=self.api, for_part=None) request_tracker = MockFulfillmentRequestTracker() info = RBFulfillmentInfo( fulfill_part_url, request_tracker.fulfillment_request, self.api, "data source", "identifier type", "identifier", "key", fulfillment_proxy=fulfillment_proxy, ) # We won't be using fulfill_part_url, since it's only used # when we're fulfilling the audiobook as a whole, but let's # check to make sure it was set correctly. eq_(fulfill_part_url, info.fulfill_part_url) # Prepopulate the manifest so that we don't go over the # network trying to get it. info._fetched = True info.manifest = manifest # Now we're going to try various situations where partial # fulfillment is impossible. Each one will raise # CannotPartiallyFulfill. m = info.fulfill_part info._content_type = "not/an/audiobook" assert_raises_regexp( CannotPartiallyFulfill, "This work does not support partial fulfillment." ) info._content_type = Representation.AUDIOBOOK_MANIFEST_MEDIA_TYPE assert_raises_regexp( CannotPartiallyFulfill, '"not a number" is not a valid part number', m, "not a number" ) assert_raises_regexp( CannotPartiallyFulfill, 'Could not locate part number -1', m, -1 ) # There are 21 parts in this audiobook, numbered from 0 to 20. assert_raises_regexp( CannotPartiallyFulfill, 'Could not locate part number 21', m, len(manifest.readingOrder) ) # Finally, let's fulfill a part that does exist. part_index = 10 fulfillment = m(part_index) assert isinstance(fulfillment, FulfillmentInfo) # Fulfillment should be requested with the real downloadUrl, not the proxy URL. assert proxied_cm_part_url, request_tracker.fulfillment_request_last_called_with eq_("https://download-piece/{}".format(part_index + 1), request_tracker.fulfillment_request_last_called_with) eq_("http://book/chapter1.mp3", fulfillment.content_link) eq_("audio/mpeg", fulfillment.content_type) class TestAudiobookManifest(RBDigitalAPITest): def test_constructor(self): """A reasonable RBdigital manifest becomes a reasonable AudiobookManifest object. """ patron_bearer_token = 'd1544585ade0abcd7908ba0e' class MockAPI(MockRBDigitalAPI): # We'll need this to match the start of our download URLs PRODUCTION_BASE_URL = 'https://' def fetch_patron_bearer_token(self, patron): return patron_bearer_token api = MockAPI( self._db, self.collection, base_path=self.base_path ) def fulfill_part_url(part): return "http://fulfill-part/%s" % part ignore, [book] = api.get_data( "response_patron_checkouts_with_audiobook.json" ) # If we don't pass in a `fulfill_part_url` function, then # a CM-proxied access doc URL will not be generated. Now # that clients cannot directly retrieve the access document # from the primary downloadUrl, not providing a function to # generate this alternative is treated as an error. assert_raises_regexp( TypeError, "__init__\(\) takes exactly .* arguments .*", AudiobookManifest, book ) manifest = AudiobookManifest(book, fulfill_part_url) # We know about a lot of metadata. eq_('http://bib.schema.org/Audiobook', manifest.metadata['@type']) eq_(u'Sharyn McCrumb', manifest.metadata['author']) eq_(u'Award-winning, New York Times best-selling novelist Sharyn McCrumb crafts absorbing, lyrical tales featuring the rich culture and lore of Appalachia. In the compelling...', manifest.metadata['description']) eq_(52710.0, manifest.metadata['duration']) eq_(u'9781449871789', manifest.metadata['identifier']) eq_(u'Barbara Rosenblat', manifest.metadata['narrator']) eq_(u'Recorded Books, Inc.', manifest.metadata['publisher']) eq_(u'', manifest.metadata['rbdigital:encryptionKey']) eq_(False, manifest.metadata['rbdigital:hasDrm']) eq_(316314528, manifest.metadata['schema:contentSize']) eq_(u'The Ballad of Frankie Silver', manifest.metadata['title']) # We know about 21 items in the reading order. eq_(21, len(manifest.readingOrder)) # Let's spot check one. first = manifest.readingOrder[0] eq_("https://download-piece/1", first['href']) eq_("vnd.librarysimplified/rbdigital-access-document+json", first['type']) eq_("358456", first['rbdigital:id']) eq_(417200, first['schema:contentSize']) eq_("Introduction", first['title']) eq_(69.0, first['duration']) # We can ask for a manifest in which the download `type` and # `href` point to resources on this circulation manager, so # that we perform the request for the real access documents. # This is what we do when fulfilling a request for a manifest. # The other properties should remain the same. fulfillment_proxy = RBDigitalFulfillmentProxy(self._patron(), api=api, for_part=None) proxied_manifest_content = fulfillment_proxy.proxied_manifest(manifest) first_proxied = json.loads(proxied_manifest_content)['readingOrder'][0] downloadUrl = 'https://download-piece/1' # the expected download URL has the API base URL stripped off expected_downloadUrl = downloadUrl[len(api.PRODUCTION_BASE_URL):] expected_proxied_url = '{}/rbdproxy/{}?{}'.format( fulfill_part_url(0), patron_bearer_token, urllib.urlencode({'url': expected_downloadUrl}) ) eq_(expected_proxied_url, first_proxied['href']) eq_("vnd.librarysimplified/rbdigital-access-document+json", first_proxied['type']) eq_("358456", first_proxied['rbdigital:id']) eq_(417200, first_proxied['schema:contentSize']) eq_("Introduction", first_proxied['title']) eq_(69.0, first_proxied['duration']) # An alternate link and a cover link were imported. alternate, cover = manifest.links eq_("alternate", alternate['rel']) eq_("https://download/full-book.zip", alternate['href']) eq_("application/zip", alternate['type']) eq_("cover", cover['rel']) assert "image_512x512" in cover['href'] eq_("image/png", cover['type']) def test_best_cover(self): m = AudiobookManifest.best_cover # If there are no covers, or no URLs, None is returned. eq_(None, m(None)) eq_(None, m([])) eq_(None, m([{'nonsense': 'value'}])) eq_(None, m([{'name': 'xx-large'}])) eq_(None, m([{'url': 'somewhere'}])) # No image with a name other than 'large', 'x-large', or # 'xx-large' will be accepted. eq_(None, m([{'name': 'xx-small', 'url': 'foo'}])) # Of those, the largest sized image will be used. eq_('yep', m([ {'name': 'small', 'url': 'no way'}, {'name': 'large', 'url': 'nope'}, {'name': 'x-large', 'url': 'still nope'}, {'name': 'xx-large', 'url': 'yep'}, ])) class TestRBDigitalRepresentationExtractor(RBDigitalAPITest): def test_book_info_with_metadata(self): # Tests that can convert a RBDigital json block into a Metadata object. datastr, datadict = self.api.get_data("response_isbn_found_1.json") metadata = RBDigitalRepresentationExtractor.isbn_info_to_metadata(datadict) eq_("Tea Time for the Traditionally Built", metadata.title) eq_(None, metadata.sort_title) eq_(None, metadata.subtitle) eq_(Edition.BOOK_MEDIUM, metadata.medium) eq_("No. 1 Ladies Detective Agency", metadata.series) eq_(10, metadata.series_position) eq_("eng", metadata.language) eq_("Anchor", metadata.publisher) eq_(None, metadata.imprint) eq_(2013, metadata.published.year) eq_(12, metadata.published.month) eq_(27, metadata.published.day) [author1, author2, narrator] = metadata.contributors eq_(u"Mccall Smith, Alexander", author1.sort_name) eq_(u"Alexander Mccall Smith", author1.display_name) eq_([Contributor.AUTHOR_ROLE], author1.roles) eq_(u"Wilder, Thornton", author2.sort_name) eq_(u"Thornton Wilder", author2.display_name) eq_([Contributor.AUTHOR_ROLE], author2.roles) eq_(u"Guskin, Laura Flanagan", narrator.sort_name) eq_(u"Laura Flanagan Guskin", narrator.display_name) eq_([Contributor.NARRATOR_ROLE], narrator.roles) subjects = sorted(metadata.subjects, key=lambda x: x.identifier) weight = Classification.TRUSTED_DISTRIBUTOR_WEIGHT eq_([(None, u"FICTION / Humorous / General", Subject.BISAC, weight), (u'adult', None, Classifier.RBDIGITAL_AUDIENCE, weight), (u'humorous-fiction', None, Subject.RBDIGITAL, weight), (u'mystery', None, Subject.RBDIGITAL, weight), (u'womens-fiction', None, Subject.RBDIGITAL, weight) ], [(x.identifier, x.name, x.type, x.weight) for x in subjects] ) # Related IDs. eq_((Identifier.RB_DIGITAL_ID, '9780307378101'), (metadata.primary_identifier.type, metadata.primary_identifier.identifier)) ids = [(x.type, x.identifier) for x in metadata.identifiers] # We made exactly one RBDigital and one ISBN-type identifiers. eq_( [(Identifier.ISBN, "9780307378101"), (Identifier.RB_DIGITAL_ID, "9780307378101")], sorted(ids) ) # Available formats. [epub] = sorted(metadata.circulation.formats, key=lambda x: x.content_type) eq_(Representation.EPUB_MEDIA_TYPE, epub.content_type) eq_(DeliveryMechanism.ADOBE_DRM, epub.drm_scheme) # Links to various resources. shortd, image = sorted( metadata.links, key=lambda x:x.rel ) eq_(Hyperlink.SHORT_DESCRIPTION, shortd.rel) assert shortd.content.startswith("THE NO. 1 LADIES' DETECTIVE AGENCY") eq_(Hyperlink.IMAGE, image.rel) eq_('http://images.oneclickdigital.com/EB00148140/EB00148140_image_128x192.jpg', image.href) thumbnail = image.thumbnail eq_(Hyperlink.THUMBNAIL_IMAGE, thumbnail.rel) eq_('http://images.oneclickdigital.com/EB00148140/EB00148140_image_95x140.jpg', thumbnail.href) # Note: For now, no measurements associated with the book. # Request only the bibliographic information. metadata = RBDigitalRepresentationExtractor.isbn_info_to_metadata(datadict, include_bibliographic=True, include_formats=False) eq_("Tea Time for the Traditionally Built", metadata.title) eq_(None, metadata.circulation) # Request only the format information. metadata = RBDigitalRepresentationExtractor.isbn_info_to_metadata(datadict, include_bibliographic=False, include_formats=True) eq_(None, metadata.title) [epub] = sorted(metadata.circulation.formats, key=lambda x: x.content_type) eq_(Representation.EPUB_MEDIA_TYPE, epub.content_type) eq_(DeliveryMechanism.ADOBE_DRM, epub.drm_scheme) def test_book_info_metadata_no_series(self): """'Default Blank' is not a series -- it's a string representing the absence of a series. """ datastr, datadict = self.api.get_data("response_isbn_found_no_series.json") metadata = RBDigitalRepresentationExtractor.isbn_info_to_metadata(datadict) eq_("Tea Time for the Traditionally Built", metadata.title) eq_(None, metadata.series) eq_(None, metadata.series_position) class TestRBDigitalBibliographicCoverageProvider(RBDigitalAPITest): """Test the code that looks up bibliographic information from RBDigital.""" def setup(self): super(TestRBDigitalBibliographicCoverageProvider, self).setup() self.provider = RBDigitalBibliographicCoverageProvider( self.collection, api_class=MockRBDigitalAPI, api_class_kwargs=dict(base_path=os.path.split(__file__)[0]) ) self.api = self.provider.api def test_script_instantiation(self): """Test that RunCoverageProviderScript can instantiate the coverage provider. """ script = RunCollectionCoverageProviderScript( RBDigitalBibliographicCoverageProvider, self._db, api_class=MockRBDigitalAPI ) [provider] = script.providers assert isinstance(provider, RBDigitalBibliographicCoverageProvider) assert isinstance(provider.api, MockRBDigitalAPI) eq_(self.collection, provider.collection) def test_invalid_or_unrecognized_guid(self): # A bad or malformed ISBN can't get coverage. identifier = self._identifier() identifier.identifier = 'ISBNbadbad' datastr, datadict = self.api.get_data("response_isbn_notfound_1.json") self.api.queue_response(status_code=200, content=datastr) failure = self.provider.process_item(identifier) assert isinstance(failure, CoverageFailure) eq_(True, failure.transient) assert failure.exception.startswith('Cannot find RBDigital metadata') def test_process_item_creates_presentation_ready_work(self): # Test the normal workflow where we ask RBDigital for data, # RBDigital provides it, and we create a presentation-ready work. datastr, datadict = self.api.get_data("response_isbn_found_1.json") self.api.queue_response(200, content=datastr) # Here's the book mentioned in response_isbn_found_1. identifier = self._identifier(identifier_type=Identifier.RB_DIGITAL_ID) identifier.identifier = '9780307378101' # This book has no LicensePool. eq_([], identifier.licensed_through) # Run it through the RBDigitalBibliographicCoverageProvider result = self.provider.process_item(identifier) eq_(identifier, result) # A LicensePool was created. But we do NOT know how many copies of this # book are available, only what formats it's available in. [pool] = identifier.licensed_through eq_(0, pool.licenses_owned) [lpdm] = pool.delivery_mechanisms eq_('application/epub+zip (application/vnd.adobe.adept+xml)', lpdm.delivery_mechanism.name) # A Work was created and made presentation ready. eq_('Tea Time for the Traditionally Built', pool.work.title) eq_(True, pool.work.presentation_ready) class TestRBDigitalSyncMonitor(RBDigitalAPITest): """Test the superclass of most of the RBDigital monitors.""" def setup(self): super(TestRBDigitalSyncMonitor, self).setup() self.base_path = os.path.split(__file__)[0] self.collection = MockRBDigitalAPI.mock_collection(self._db) def test_run_once(self): # Calling run_once calls invoke(), and invoke() is # expected to return two numbers. class Mock(RBDigitalSyncMonitor): SERVICE_NAME = "A service" def invoke(self): self.invoked = True return 10, 5 monitor = Mock( self._db, self.collection, api_class=MockRBDigitalAPI, ) progress = monitor.run_once(monitor.timestamp().to_data()) # invoke() was called. eq_(True, monitor.invoked) # The TimestampData returned by run_once() describes its # achievements. eq_( "Records received from vendor: 10. Records written to database: 5", progress.achievements ) # The TimestampData does not include any timing information -- # that will be applied by run(). eq_(None, progress.start) eq_(None, progress.finish) class TestRBDigitalImportMonitor(RBDigitalAPITest): def test_invoke(self): class MockAPI(RBDigitalAPI): def __init__(self): self.called = False def populate_all_catalog(self): self.called = True api = MockAPI() monitor = RBDigitalImportMonitor( self._db, self.collection, api_class=api ) timestamp = monitor.timestamp() eq_(None, timestamp.counter) monitor.invoke() # This monitor is for performing the initial import, and it # can only be invoked once. eq_(True, api.called) eq_(1, timestamp.counter) # Invoking the monitor a second time will do nothing. api.called = False result = monitor.invoke() eq_((0, 0), result) eq_(False, api.called) class TestRBDigitalDeltaMonitor(RBDigitalAPITest): def test_invoke(self): # This monitor calls RBDigitalAPI.populate_delta() when # invoked. class MockAPI(RBDigitalAPI): def __init__(self): self.called = False def populate_delta(self): self.called = True api = MockAPI() monitor = RBDigitalDeltaMonitor( self._db, self.collection, api_class=api ) monitor.invoke() eq_(True, api.called) # NB: These tests would normally be distributed into other test files # (e.g., `test_controller.py`), but because RBdigital is being phased # out, I have chosen to capture them here, in order to make the code # clean up easier when the time soon comes. class TestRBDProxyRoutes(RouteTest): CONTROLLER_NAME = "rbdproxy" def test_rbdproxy_bearer(self): url = '/works/<license_pool_id>/fulfill/<mechanism_id>/<part>/rbdproxy/<bearer>' self.assert_request_calls( url, self.controller.proxy, '<bearer>' ) class TestRBDProxyController(ControllerTest): def test_proxy(self): patron = self.default_patron collection = MockRBDigitalAPI.mock_collection(self._db) downloadUrl = 'unprefixed/download/url' valid_bearer_token = 'valid_bearer_token' invalid_bearer_token = 'invalid_bearer_token' class MockAPI(MockRBDigitalAPI): PRODUCTION_BASE_URL = 'https://my_base_url/' @staticmethod def get_credential_by_token(_db, data_source, credential_type, token): # In normal operation, we would lookup the credential by its token # to ensure that it is authorized and so we can instantiate a new # RBDigitalAPI. Here we construct and return a fake credential with # the collection we need to instantiate an RBDigitalAPI instance. # But we only do this if our token is valid. if token != valid_bearer_token: return None credential = Credential.lookup(_db, data_source, credential_type, patron, None, allow_persistent_token=True, collection=collection, allow_empty_token=True) credential.credential = token credential.expires = datetime.datetime.utcnow()+datetime.timedelta(minutes=30) return credential def patron_fulfillment_request(self, patron, url, reauthorize=None): class Response: def __init__(self, **kwargs): self.__dict__.update(kwargs) response = Response(**dict( content=json.dumps({"request_url": url, "reauthorize": reauthorize}), status_code=200, headers={'Content-Type': 'application/json'}, )) return response # No URL parameter, but valid token with self.app.test_request_context("/"): response = self.app.manager.rbdproxy.proxy(valid_bearer_token) eq_(400, response.status_code) # No token, but valid URL parameter with self.app.test_request_context('/?url={}'.format(downloadUrl)): response = self.app.manager.rbdproxy.proxy(invalid_bearer_token) assert len(downloadUrl) > 0 eq_(403, response.status_code) # Valid URL and valid token. We need our mock api for this one. with self.app.test_request_context('/?url={}'.format(downloadUrl)): response = self.app.manager.rbdproxy.proxy(valid_bearer_token, api_class=MockAPI) expected_url = '{}{}'.format(MockAPI.PRODUCTION_BASE_URL, downloadUrl) assert len(downloadUrl) > 0 assert len(MockAPI.PRODUCTION_BASE_URL) > 0 eq_(200, response.status_code) # We should prepend the downloadUrl with the API prefix. eq_(expected_url, response.json.get('request_url')) # We should not allow token reauthorization when proxying. eq_(False, response.json.get('reauthorize'))
export { default } from 'global-admin/components/cloud-credential-azure/component';
const cinemaNames = [ "Cinema One", "Cinema Two", "Cinema Three", ]; export default cinemaNames;
ace.define("ace/mode/xml_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(acequire, exports, module) { "use strict"; var oop = acequire("../lib/oop"); var TextHighlightRules = acequire("./text_highlight_rules").TextHighlightRules; var XmlHighlightRules = function(normalize) { var tagRegex = "[a-zA-Z][-_a-zA-Z0-9]*"; this.$rules = { start : [ {token : "string.cdata.xml", regex : "<\\!\\[CDATA\\[", next : "cdata"}, { token : ["punctuation.xml-decl.xml", "keyword.xml-decl.xml"], regex : "(<\\?)(xml)(?=[\\s])", next : "xml_decl", caseInsensitive: true }, { token : ["punctuation.instruction.xml", "keyword.instruction.xml"], regex : "(<\\?)(" + tagRegex + ")", next : "processing_instruction", }, {token : "comment.xml", regex : "<\\!--", next : "comment"}, { token : ["xml-pe.doctype.xml", "xml-pe.doctype.xml"], regex : "(<\\!)(DOCTYPE)(?=[\\s])", next : "doctype", caseInsensitive: true }, {include : "tag"}, {token : "text.end-tag-open.xml", regex: "</"}, {token : "text.tag-open.xml", regex: "<"}, {include : "reference"}, {defaultToken : "text.xml"} ], xml_decl : [{ token : "entity.other.attribute-name.decl-attribute-name.xml", regex : "(?:" + tagRegex + ":)?" + tagRegex + "" }, { token : "keyword.operator.decl-attribute-equals.xml", regex : "=" }, { include: "whitespace" }, { include: "string" }, { token : "punctuation.xml-decl.xml", regex : "\\?>", next : "start" }], processing_instruction : [ {token : "punctuation.instruction.xml", regex : "\\?>", next : "start"}, {defaultToken : "instruction.xml"} ], doctype : [ {include : "whitespace"}, {include : "string"}, {token : "xml-pe.doctype.xml", regex : ">", next : "start"}, {token : "xml-pe.xml", regex : "[-_a-zA-Z0-9:]+"}, {token : "punctuation.int-subset", regex : "\\[", push : "int_subset"} ], int_subset : [{ token : "text.xml", regex : "\\s+" }, { token: "punctuation.int-subset.xml", regex: "]", next: "pop" }, { token : ["punctuation.markup-decl.xml", "keyword.markup-decl.xml"], regex : "(<\\!)(" + tagRegex + ")", push : [{ token : "text", regex : "\\s+" }, { token : "punctuation.markup-decl.xml", regex : ">", next : "pop" }, {include : "string"}] }], cdata : [ {token : "string.cdata.xml", regex : "\\]\\]>", next : "start"}, {token : "text.xml", regex : "\\s+"}, {token : "text.xml", regex : "(?:[^\\]]|\\](?!\\]>))+"} ], comment : [ {token : "comment.xml", regex : "-->", next : "start"}, {defaultToken : "comment.xml"} ], reference : [{ token : "constant.language.escape.reference.xml", regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)" }], attr_reference : [{ token : "constant.language.escape.reference.attribute-value.xml", regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)" }], tag : [{ token : ["meta.tag.punctuation.tag-open.xml", "meta.tag.punctuation.end-tag-open.xml", "meta.tag.tag-name.xml"], regex : "(?:(<)|(</))((?:" + tagRegex + ":)?" + tagRegex + ")", next: [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"} ] }], tag_whitespace : [ {token : "text.tag-whitespace.xml", regex : "\\s+"} ], whitespace : [ {token : "text.whitespace.xml", regex : "\\s+"} ], string: [{ token : "string.xml", regex : "'", push : [ {token : "string.xml", regex: "'", next: "pop"}, {defaultToken : "string.xml"} ] }, { token : "string.xml", regex : '"', push : [ {token : "string.xml", regex: '"', next: "pop"}, {defaultToken : "string.xml"} ] }], attributes: [{ token : "entity.other.attribute-name.xml", regex : "(?:" + tagRegex + ":)?" + tagRegex + "" }, { token : "keyword.operator.attribute-equals.xml", regex : "=" }, { include: "tag_whitespace" }, { include: "attribute_value" }], attribute_value: [{ token : "string.attribute-value.xml", regex : "'", push : [ {token : "string.attribute-value.xml", regex: "'", next: "pop"}, {include : "attr_reference"}, {defaultToken : "string.attribute-value.xml"} ] }, { token : "string.attribute-value.xml", regex : '"', push : [ {token : "string.attribute-value.xml", regex: '"', next: "pop"}, {include : "attr_reference"}, {defaultToken : "string.attribute-value.xml"} ] }] }; if (this.constructor === XmlHighlightRules) this.normalizeRules(); }; (function() { this.embedTagRules = function(HighlightRules, prefix, tag){ this.$rules.tag.unshift({ token : ["meta.tag.punctuation.tag-open.xml", "meta.tag." + tag + ".tag-name.xml"], regex : "(<)(" + tag + "(?=\\s|>|$))", next: [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : prefix + "start"} ] }); this.$rules[tag + "-end"] = [ {include : "attributes"}, {token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next: "start", onMatch : function(value, currentState, stack) { stack.splice(0); return this.token; }} ] this.embedRules(HighlightRules, prefix, [{ token: ["meta.tag.punctuation.end-tag-open.xml", "meta.tag." + tag + ".tag-name.xml"], regex : "(</)(" + tag + "(?=\\s|>|$))", next: tag + "-end" }, { token: "string.cdata.xml", regex : "<\\!\\[CDATA\\[" }, { token: "string.cdata.xml", regex : "\\]\\]>" }]); }; }).call(TextHighlightRules.prototype); oop.inherits(XmlHighlightRules, TextHighlightRules); exports.XmlHighlightRules = XmlHighlightRules; }); ace.define("ace/mode/behaviour/xml",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"], function(acequire, exports, module) { "use strict"; var oop = acequire("../../lib/oop"); var Behaviour = acequire("../behaviour").Behaviour; var TokenIterator = acequire("../../token_iterator").TokenIterator; var lang = acequire("../../lib/lang"); function is(token, type) { return token.type.lastIndexOf(type + ".xml") > -1; } var XmlBehaviour = function () { this.add("string_dquotes", "insertion", function (state, action, editor, session, text) { if (text == '"' || text == "'") { var quote = text; var selected = session.doc.getTextRange(editor.getSelectionRange()); if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) { return { text: quote + selected + quote, selection: false }; } var cursor = editor.getCursorPosition(); var line = session.doc.getLine(cursor.row); var rightChar = line.substring(cursor.column, cursor.column + 1); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (rightChar == quote && (is(token, "attribute-value") || is(token, "string"))) { return { text: "", selection: [1, 1] }; } if (!token) token = iterator.stepBackward(); if (!token) return; while (is(token, "tag-whitespace") || is(token, "whitespace")) { token = iterator.stepBackward(); } var rightSpace = !rightChar || rightChar.match(/\s/); if (is(token, "attribute-equals") && (rightSpace || rightChar == '>') || (is(token, "decl-attribute-equals") && (rightSpace || rightChar == '?'))) { return { text: quote + quote, selection: [1, 1] }; } } }); this.add("string_dquotes", "deletion", function(state, action, editor, session, range) { var selected = session.doc.getTextRange(range); if (!range.isMultiLine() && (selected == '"' || selected == "'")) { var line = session.doc.getLine(range.start.row); var rightChar = line.substring(range.start.column + 1, range.start.column + 2); if (rightChar == selected) { range.end.column++; return range; } } }); this.add("autoclosing", "insertion", function (state, action, editor, session, text) { if (text == '>') { var position = editor.getCursorPosition(); var iterator = new TokenIterator(session, position.row, position.column); var token = iterator.getCurrentToken() || iterator.stepBackward(); if (!token || !(is(token, "tag-name") || is(token, "tag-whitespace") || is(token, "attribute-name") || is(token, "attribute-equals") || is(token, "attribute-value"))) return; if (is(token, "reference.attribute-value")) return; if (is(token, "attribute-value")) { var firstChar = token.value.charAt(0); if (firstChar == '"' || firstChar == "'") { var lastChar = token.value.charAt(token.value.length - 1); var tokenEnd = iterator.getCurrentTokenColumn() + token.value.length; if (tokenEnd > position.column || tokenEnd == position.column && firstChar != lastChar) return; } } while (!is(token, "tag-name")) { token = iterator.stepBackward(); } var tokenRow = iterator.getCurrentTokenRow(); var tokenColumn = iterator.getCurrentTokenColumn(); if (is(iterator.stepBackward(), "end-tag-open")) return; var element = token.value; if (tokenRow == position.row) element = element.substring(0, position.column - tokenColumn); if (this.voidElements.hasOwnProperty(element.toLowerCase())) return; return { text: ">" + "</" + element + ">", selection: [1, 1] }; } }); this.add("autoindent", "insertion", function (state, action, editor, session, text) { if (text == "\n") { var cursor = editor.getCursorPosition(); var line = session.getLine(cursor.row); var iterator = new TokenIterator(session, cursor.row, cursor.column); var token = iterator.getCurrentToken(); if (token && token.type.indexOf("tag-close") !== -1) { if (token.value == "/>") return; while (token && token.type.indexOf("tag-name") === -1) { token = iterator.stepBackward(); } if (!token) { return; } var tag = token.value; var row = iterator.getCurrentTokenRow(); token = iterator.stepBackward(); if (!token || token.type.indexOf("end-tag") !== -1) { return; } if (this.voidElements && !this.voidElements[tag]) { var nextToken = session.getTokenAt(cursor.row, cursor.column+1); var line = session.getLine(row); var nextIndent = this.$getIndent(line); var indent = nextIndent + session.getTabString(); if (nextToken && nextToken.value === "</") { return { text: "\n" + indent + "\n" + nextIndent, selection: [1, indent.length, 1, indent.length] }; } else { return { text: "\n" + indent }; } } } } }); }; oop.inherits(XmlBehaviour, Behaviour); exports.XmlBehaviour = XmlBehaviour; }); ace.define("ace/mode/folding/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/range","ace/mode/folding/fold_mode","ace/token_iterator"], function(acequire, exports, module) { "use strict"; var oop = acequire("../../lib/oop"); var lang = acequire("../../lib/lang"); var Range = acequire("../../range").Range; var BaseFoldMode = acequire("./fold_mode").FoldMode; var TokenIterator = acequire("../../token_iterator").TokenIterator; var FoldMode = exports.FoldMode = function(voidElements, optionalEndTags) { BaseFoldMode.call(this); this.voidElements = voidElements || {}; this.optionalEndTags = oop.mixin({}, this.voidElements); if (optionalEndTags) oop.mixin(this.optionalEndTags, optionalEndTags); }; oop.inherits(FoldMode, BaseFoldMode); var Tag = function() { this.tagName = ""; this.closing = false; this.selfClosing = false; this.start = {row: 0, column: 0}; this.end = {row: 0, column: 0}; }; function is(token, type) { return token.type.lastIndexOf(type + ".xml") > -1; } (function() { this.getFoldWidget = function(session, foldStyle, row) { var tag = this._getFirstTagInLine(session, row); if (!tag) return ""; if (tag.closing || (!tag.tagName && tag.selfClosing)) return foldStyle == "markbeginend" ? "end" : ""; if (!tag.tagName || tag.selfClosing || this.voidElements.hasOwnProperty(tag.tagName.toLowerCase())) return ""; if (this._findEndTagInLine(session, row, tag.tagName, tag.end.column)) return ""; return "start"; }; this._getFirstTagInLine = function(session, row) { var tokens = session.getTokens(row); var tag = new Tag(); for (var i = 0; i < tokens.length; i++) { var token = tokens[i]; if (is(token, "tag-open")) { tag.end.column = tag.start.column + token.value.length; tag.closing = is(token, "end-tag-open"); token = tokens[++i]; if (!token) return null; tag.tagName = token.value; tag.end.column += token.value.length; for (i++; i < tokens.length; i++) { token = tokens[i]; tag.end.column += token.value.length; if (is(token, "tag-close")) { tag.selfClosing = token.value == '/>'; break; } } return tag; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == '/>'; return tag; } tag.start.column += token.value.length; } return null; }; this._findEndTagInLine = function(session, row, tagName, startColumn) { var tokens = session.getTokens(row); var column = 0; for (var i = 0; i < tokens.length; i++) { var token = tokens[i]; column += token.value.length; if (column < startColumn) continue; if (is(token, "end-tag-open")) { token = tokens[i + 1]; if (token && token.value == tagName) return true; } } return false; }; this._readTagForward = function(iterator) { var token = iterator.getCurrentToken(); if (!token) return null; var tag = new Tag(); do { if (is(token, "tag-open")) { tag.closing = is(token, "end-tag-open"); tag.start.row = iterator.getCurrentTokenRow(); tag.start.column = iterator.getCurrentTokenColumn(); } else if (is(token, "tag-name")) { tag.tagName = token.value; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == "/>"; tag.end.row = iterator.getCurrentTokenRow(); tag.end.column = iterator.getCurrentTokenColumn() + token.value.length; iterator.stepForward(); return tag; } } while(token = iterator.stepForward()); return null; }; this._readTagBackward = function(iterator) { var token = iterator.getCurrentToken(); if (!token) return null; var tag = new Tag(); do { if (is(token, "tag-open")) { tag.closing = is(token, "end-tag-open"); tag.start.row = iterator.getCurrentTokenRow(); tag.start.column = iterator.getCurrentTokenColumn(); iterator.stepBackward(); return tag; } else if (is(token, "tag-name")) { tag.tagName = token.value; } else if (is(token, "tag-close")) { tag.selfClosing = token.value == "/>"; tag.end.row = iterator.getCurrentTokenRow(); tag.end.column = iterator.getCurrentTokenColumn() + token.value.length; } } while(token = iterator.stepBackward()); return null; }; this._pop = function(stack, tag) { while (stack.length) { var top = stack[stack.length-1]; if (!tag || top.tagName == tag.tagName) { return stack.pop(); } else if (this.optionalEndTags.hasOwnProperty(top.tagName)) { stack.pop(); continue; } else { return null; } } }; this.getFoldWidgetRange = function(session, foldStyle, row) { var firstTag = this._getFirstTagInLine(session, row); if (!firstTag) return null; var isBackward = firstTag.closing || firstTag.selfClosing; var stack = []; var tag; if (!isBackward) { var iterator = new TokenIterator(session, row, firstTag.start.column); var start = { row: row, column: firstTag.start.column + firstTag.tagName.length + 2 }; if (firstTag.start.row == firstTag.end.row) start.column = firstTag.end.column; while (tag = this._readTagForward(iterator)) { if (tag.selfClosing) { if (!stack.length) { tag.start.column += tag.tagName.length + 2; tag.end.column -= 2; return Range.fromPoints(tag.start, tag.end); } else continue; } if (tag.closing) { this._pop(stack, tag); if (stack.length == 0) return Range.fromPoints(start, tag.start); } else { stack.push(tag); } } } else { var iterator = new TokenIterator(session, row, firstTag.end.column); var end = { row: row, column: firstTag.start.column }; while (tag = this._readTagBackward(iterator)) { if (tag.selfClosing) { if (!stack.length) { tag.start.column += tag.tagName.length + 2; tag.end.column -= 2; return Range.fromPoints(tag.start, tag.end); } else continue; } if (!tag.closing) { this._pop(stack, tag); if (stack.length == 0) { tag.start.column += tag.tagName.length + 2; if (tag.start.row == tag.end.row && tag.start.column < tag.end.column) tag.start.column = tag.end.column; return Range.fromPoints(tag.start, end); } } else { stack.push(tag); } } } }; }).call(FoldMode.prototype); }); ace.define("ace/mode/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text","ace/mode/xml_highlight_rules","ace/mode/behaviour/xml","ace/mode/folding/xml","ace/worker/worker_client"], function(acequire, exports, module) { "use strict"; var oop = acequire("../lib/oop"); var lang = acequire("../lib/lang"); var TextMode = acequire("./text").Mode; var XmlHighlightRules = acequire("./xml_highlight_rules").XmlHighlightRules; var XmlBehaviour = acequire("./behaviour/xml").XmlBehaviour; var XmlFoldMode = acequire("./folding/xml").FoldMode; var WorkerClient = acequire("../worker/worker_client").WorkerClient; var Mode = function() { this.HighlightRules = XmlHighlightRules; this.$behaviour = new XmlBehaviour(); this.foldingRules = new XmlFoldMode(); }; oop.inherits(Mode, TextMode); (function() { this.voidElements = lang.arrayToMap([]); this.blockComment = {start: "<!--", end: "-->"}; this.createWorker = function(session) { var worker = new WorkerClient(["ace"], "ace/mode/xml_worker", "Worker"); worker.attachToDocument(session.getDocument()); worker.on("error", function(e) { session.setAnnotations(e.data); }); worker.on("terminate", function() { session.clearAnnotations(); }); return worker; }; this.$id = "ace/mode/xml"; }).call(Mode.prototype); exports.Mode = Mode; });
const Command = require('./Command'); const request = require("request"); module.exports = class CatCommand extends Command{ execute(message){ super.execute(message, Command.CAT()); Command.logDebug("requesting http://aws.random.cat/meow"); request("http://aws.random.cat/meow", function (error, response, body) { var json = JSON.parse(body); if (!json.file || error){ Command.logWarn(`request failed ${error}`); } else { Command.logDebug(`request succesed ${JSON.stringify(json)}`); message.channel.send({ embed: { title: "So Cute :heart_eyes: ", color: 3447003, image: { url: json.file }, timestamp: new Date() } }); } return; }); } }
import React from 'react' import auth from '../../src' import { Client } from '../../src/ipc' import { ipcStorage, getData } from '../../src/storage' import './IdpSelect.css' export default class IdpSelect extends React.Component { state = { idp: '', error: null } handleChangeIdp = (event) => { let idp = event.target.value // Auto-prepend https: if the user is not typing it if (!/^($|h$|ht)/.test(idp)) idp = `https://${idp}` this.setState({ idp }) } handleBlurIdp = (event) => { let idp = event.target.value // Auto-prepend https: if not present if (!/^(https?:\/\/|$)/.test(idp)) idp = idp.replace(/^([a-z]*:\/*)?/, 'https://') this.setState({ idp }) } handleSelectIdp = (idp) => async (event) => { event.preventDefault() this.setState({ idp }) if (!window.opener) { console.warn('No parent window') this.setState({ error: "Couldn't find the application window. " + 'Try closing this popup window and logging in again.', }) return } const loginOptions = { ...(await this.getClient().request('getLoginOptions')), storage: this.getStorage(), } await auth.login(idp, loginOptions) } getClient() { return new Client(window.opener, this.props.appOrigin) } getStorage() { return ipcStorage(this.getClient()) } async componentDidMount() { const { rpConfig } = await getData(this.getStorage()) if (rpConfig) { this.setState({ idp: rpConfig.provider.url }) } this.idpInput.focus() } render() { const { appName, idps } = this.props const { idp, error } = this.state return ( <div> <h1> Log in to <span className="app-name">{appName}</span> </h1> {error && <p className="error">{error}</p>} <p>Please enter your WebID or the URL of your identity provider:</p> <form className="custom-idp" onSubmit={this.handleSelectIdp(idp)}> <input ref={(input) => (this.idpInput = input)} type="url" placeholder="https://my-identity.provider" value={idp} onChange={this.handleChangeIdp} onBlur={this.handleBlurIdp} /> <button type="submit" disabled={!idp}> Go </button> </form> <p>Or pick an identity provider from the list below:</p> <div className="idp-list"> {idps.map((idp) => ( <button className="idp" onClick={this.handleSelectIdp(idp.url)} key={idp.url} > {idp.displayName} </button> ))} </div> </div> ) } }
// router/appPerspectives.js - application perspectives import Vue from 'vue' import * as _const from '@/store/_constants' import { appMenu, menuRoute } from '@/router/appMenu' /** appPerspectives defines all application perspectives. Note: - reusing perspective menu items in app menu - appPerspectives is simply the reference for all available perspectives - store.state.perspectives maps to the app UI tabs **/ const appPerspectives = { ...appMenu.perspectives.menuItems } /** buildTabs constructs perspective tabs, with additional properties. { key, // the tab key for binding :value by UI component (Tabs). visible // boolean, indicating if the tab is opened or closed. } Note: - the key is initially the same as alias or name in app perspectives - for multiple tabs, the key for the new tab will be generated - see openAppTab function **/ export const buildAppTabs = () => { let i = 0 let appTabs = [] for (let key in appPerspectives) { let p = Object.assign({}, appPerspectives[key]) p.key = key p.title = p.alias || p.name p.tabIndex = i++ p.visible = true appTabs.push(p) } // console.log('built appTabs:', JSON.stringify(appTabs)) return appTabs } /** checkAppTabs checks perspective tabs. Note: - if all tabs are closed, redirect to a default page (e.g. 'About'). **/ export const checkAppTabs = (store, router) => { let togo = appMenu.about let tabs = store.state.perspectives // console.log('current tabs:', JSON.stringify(store.state.perspectives, null, 2)) for (let tab of tabs) { if (tab.visible) return true } menuRoute(togo, router, store) } /** checkMultiTab checks if allowing adding extra tab for the same perspective. **/ export const checkMultiTab = (tabKey, store) => { let tabs = store.state.perspectives let tab = tabs.find(e => e.key === tabKey) if (tab && tab.allowMulti) { let filteredTabs = tabs.filter(e => e.name === tab.name && e.visible) return filteredTabs.length < _const.PERSPECTIVES_LIMIT } return false } // getPerspective looks up a perspective by route path export const getPerspective = (path) => { for (let key in appPerspectives) { let p = appPerspectives[key] // console.log('getPerspective: comparing', path, JSON.stringify(p)) if (p.route === path) { return { key: key, ...p } } } // console.log('no perspective for path:', path) return null } // moveBackward swaps a tab by name with its previous item export const moveBackward = (name, tabs) => { let tidx = tabs.findIndex(e => e.key === name) let prev = tidx > 0 ? (tidx - 1) % tabs.length : tabs.length - 1 if (tabs.length > tidx && tidx >= 0) { let prevItem = tabs[prev] let nameItem = tabs[tidx] Vue.set(tabs, tidx, {...prevItem, tabIndex: tidx}) Vue.set(tabs, prev, {...nameItem, tabIndex: prev}) } for (let i = 0; i < tabs.length; i++) { tabs[i].tabIndex = i } } // moveForward swaps a tab by name with its next item export const moveForward = (name, tabs) => { let tidx = tabs.findIndex(e => e.key === name) let next = (tidx + 1) % tabs.length if (tabs.length > tidx && tidx >= 0) { let nextItem = tabs[next] let nameItem = tabs[tidx] Vue.set(tabs, tidx, {...nextItem, tabIndex: tidx}) Vue.set(tabs, next, {...nameItem, tabIndex: next}) } for (let i = 0; i < tabs.length; i++) { tabs[i].tabIndex = i } } // openAppTabByUser sync up active tab state and router history on user action. export const openAppTabByUser = (tabKey, store, router) => { let tabs = store.state.perspectives for (let tab of tabs) { if (tab.key === tabKey) { store.commit(_const.ACTIVE_TAB_KEY, tabKey) Vue.set(tab, 'visible', true) menuRoute(tab, router, store) } } } /** openAppTab opens or appends a new tab per the route path. Note: - if a route-associated tab already exists, set visible = true; - if the tab exists and the perspective allows multiple tabs, append a new one; - otherwise, append a new visible tab at the end. **/ export const openAppTab = (path, tabs) => { let activeTab let p = getPerspective(path) // console.log('searching tab:', path, JSON.stringify(p)) if (p) { let activeTabIndex let countsMulti = 0 let tabName = p.name let tabTitle = p.alias || p.name let tabKey = p.key for (let i = 0; i < tabs.length; i++) { // console.log('comparing path in tab:', path, JSON.stringify(tabs[i])) if (tabs[i].route === path) { tabs[i].allowMulti && countsMulti++ if (activeTab && tabs[i].visible) continue // console.log('found tab:', JSON.stringify(tabs[i])) activeTab = tabs[i] activeTabIndex = i } } if (activeTab) { let canAdd = countsMulti > 0 && countsMulti < _const.PERSPECTIVES_LIMIT if (canAdd && activeTab.visible) { let newTab = Object.assign({}, { ...p, name: tabName, key: `${tabKey}-${countsMulti}`, title: `${tabTitle}-[${countsMulti}]`, tabIndex: tabs.length, visible: true }) // console.log('appending new tab:', JSON.stringify(newTab, null, 2)) activeTab = newTab activeTabIndex = tabs.length tabs.push(newTab) } else if (!activeTab.visible) { // NOTE: moving an earlier closed tab to the end (as iView Tabs) tabs.splice(activeTabIndex, 1) tabs.push(activeTab) } // console.log(`tab[${activeTabIndex}]:`, JSON.stringify(activeTab)) activeTab.visible = true } else { activeTab = Object.assign({}, { ...p, key: tabKey, name: tabName, title: tabTitle, tabIndex: tabs.length, visible: true }) // console.log('adding tab:', activeTab.key, JSON.stringify(activeTab)) tabs.push(activeTab) } for (let i = 0; i < tabs.length; i++) { tabs[i].tabIndex = i } } // console.log('updated tabs:', JSON.stringify(tabs, null, 2)) return activeTab } /** openNewTab adds or active a new perspective tab. **/ export const openNewTab = (key, store) => { let tabs = store.state.perspectives let tab = tabs.find(e => e.key === key) // console.log('found tab by key:', key, JSON.stringify(tab, null, 2)) if (tab) { return openAppTab(tab.route, tabs) } // console.log('unable to open new tab by key:', key) return null }
#include "Arduino.h" #include "sensors_lib.h" //CONFIG FOR PNEUMATIC PRESSURE SYSTEM //SENSING: //Arduino ADC pins: //Mega: A0-A15 (all pins work fine) //Uno: A0-A5 (all pins work fine) //Micro: A0, A1, A4 - A8, A11 (A9, A10, A12, A13 all conflict with PWM pins) //Nano: A0-A7 (A4, A5 are i2c pins) //VALVES: //Arduino PWM pins: //Mega: 2 - 13, 44, 45, 46 (all pins work fine) //Uno: 3, 5, 6, 9, 10, 11 (all pins work fine) //Micro: 3, 5, 6, 9, 10, 11, 12, 13 (all pins work fine) //Nano: 3, 6, 9, 10, 11 (pin 5 fails consistently, no idea why) // Use raw USB communication as an HID object - Comment out for Serial communication #define COMMS_USB //Define the type of sensor to use (only one can be true) #define SENSOR_ANALOG true #define SENSOR_I2C false SensorSSCDANN060PGAA5 controlSensorType; SensorDisconnected masterSensorType; #define MASTER_SENSOR false #define ADC_RES 13 #define ADC_MAX_VOLTS 3.3 float ADC_MULT = 0.6666666; #define MAX_NUM_CHANNELS 8 //Define the type of controller to use (only one can be true) #define CONTROL_BANGBANG false #define CONTROL_P false #define CONTROL_PID true //Set default settings for things //If using i2c sensors... int sensorAddr=0x58; bool useMux=false; int muxAddr=0x70; //Set sensor pins int senseChannels[]={A21,A20,A19,A18,A17,A16,A15,A14}; //Set valve pins //int valvePins[][2]= { {6,9}, {10,11} }; int valvePins[][2]= {{23,22}, {21,20}, {2,3}, {4,5}, {6,7}, {8,9}, {10,14}, {29,30} }; int valveOffset[][2]={{227,226},{224,224},{225,225},{225,225}, {225,225},{225,225},{225,225},{225,225}}; //Set Button pins int buttonPins[]={26,27,28}; int robotPins[] ={26,27,28}; int extPins[] ={}; //Default controller settings float pid_start[]={0.1,0.001,0}; float deadzone_start=0.0; float setpoint_start=0; float integratorResetTime_start = -1; float minPressure_start = 0; //[psi] float maxPressure_start = 28; //[psi]
import * as ActionTypes from '../actions/application' import Immutable from "immutable"; export default function applicationState(state, action) { if (!state) { state = Immutable.Map(); } switch(action.type) { case ActionTypes.DISPLAY_PLUGIN_LIST: return state.set("display_plugin_list", true); case ActionTypes.HIDE_PLUGIN_LIST: return state.set("display_plugin_list", false); case ActionTypes.TOGGLE_PLUGIN_LIST: return state.set("display_plugin_list", !state.get("display_plugin_list")); case ActionTypes.INDICATE_CURRENT_ACTION: return state.set("current_action", action.content); default: return state; } }
import dynamic from 'next/dynamic' const DynamicComponentWithCustomLoading = dynamic( () => import('../components/hello'), { loading: () => <p>...</p> } ) const DynamicClientOnlyComponent = dynamic( () => import('../components/hello'), { ssr: false } )
/****************************************************************************** * * Copyright (C) 2014 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ #pragma once #include <gtest/gtest.h> class AllocationTestHarness : public ::testing::Test { protected: virtual void SetUp(); virtual void TearDown(); };
import os import time import comedi class stimulus_pulse(object): com = comedi.comedi_open('/dev/comedi0') def __init__(self, *args, **kwargs): #self.com = comedi.comedi_open('/dev/comedi0') super(stimulus_pulse, self).__init__(*args, **kwargs) subdevice = 0 write_mask = 0x800000 val = 0x000000 base_channel = 0 comedi.comedi_dio_bitfield2(self.com, subdevice, write_mask, val, base_channel) def pulse(self,ts): #super(stimulus_pulse, self).pulse() subdevice = 0 write_mask = 0x800000 val = 0x000000 base_channel = 0 while ts < 0.4: val = 0x800000 comedi.comedi_dio_bitfield2(self.com, subdevice, write_mask, val, base_channel) else: val = 0x000000 comedi.comedi_dio_bitfield2(self.com, subdevice, write_mask, val, base_channel)
const ColorParse = require('../src/api/ColorParse'); const StreetData = require('../src/api/StreetData'); const YelpData = require('../src/api/YelpData'); const WeatherData = require('../src/api/WeatherData'); const RouteData = require('../src/api/RouteData'); let grid = RouteData.GetPointGrid(47.660273, -122.409887, 1, 0.5); RouteData.GetGraph(grid, 0.7) .then(graph => RouteData.FindNaturePaths(graph)) .then(paths => RouteData.FindTopNaturePaths(paths)) .then(results => { console.log(results); }).catch(err => console.error(err)); // YelpData.ParkSearch(47.660273, -122.409887, 1000).then(d => { // console.log(d); // }); // let points = RouteData.GetRandomPointGrid(-90.548630, 14.616599, 1, 10); // console.log(points); // let points = RouteData.GetPointGrid(-90.548630, 14.616599, 1, 0.1); // console.log(points); // let sunData = WeatherData.GetSunPositionToday(47.6694956, -122.31547389999999); // console.log(sunData); // ColorParse.GetPalette(46.414382, 10.013988, 151.78).then(colors => { // console.log(colors); // }); // ColorParse.GetPaletteNames(46.414382, 10.013988).then(result => { // console.log(result); // }) // ColorParse.GetPaletteAnalysis(47.660259, -122.408417).then(result => { // console.log(result); // })
import asyncio import dataclasses import logging import random import time import traceback from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import aiosqlite from blspy import AugSchemeMPL import chia.server.ws_connection as ws # lgtm [py/import-and-import-from] from chia.consensus.block_creation import unfinished_block_to_full_block from chia.consensus.block_record import BlockRecord from chia.consensus.blockchain import Blockchain, ReceiveBlockResult from chia.consensus.blockchain_interface import BlockchainInterface from chia.consensus.constants import ConsensusConstants from chia.consensus.cost_calculator import NPCResult from chia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty from chia.consensus.make_sub_epoch_summary import next_sub_epoch_summary from chia.consensus.multiprocess_validation import PreValidationResult from chia.consensus.pot_iterations import calculate_sp_iters from chia.full_node.block_store import BlockStore from chia.full_node.lock_queue import LockQueue, LockClient from chia.full_node.bundle_tools import detect_potential_template_generator from chia.full_node.coin_store import CoinStore from chia.full_node.full_node_store import FullNodeStore, FullNodeStorePeakResult from chia.full_node.hint_store import HintStore from chia.full_node.mempool_manager import MempoolManager from chia.full_node.signage_point import SignagePoint from chia.full_node.sync_store import SyncStore from chia.full_node.weight_proof import WeightProofHandler from chia.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol from chia.protocols.full_node_protocol import ( RequestBlocks, RespondBlock, RespondBlocks, RespondSignagePoint, ) from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.protocols.wallet_protocol import CoinState, CoinStateUpdate from chia.server.node_discovery import FullNodePeers from chia.server.outbound_message import Message, NodeType, make_msg from chia.server.peer_store_resolver import PeerStoreResolver from chia.server.server import ChiaServer from chia.types.blockchain_format.classgroup import ClassgroupElement from chia.types.blockchain_format.pool_target import PoolTarget from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary from chia.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof from chia.types.coin_record import CoinRecord from chia.types.end_of_slot_bundle import EndOfSubSlotBundle from chia.types.full_block import FullBlock from chia.types.generator_types import BlockGenerator from chia.types.header_block import HeaderBlock from chia.types.mempool_inclusion_status import MempoolInclusionStatus from chia.types.spend_bundle import SpendBundle from chia.types.transaction_queue_entry import TransactionQueueEntry from chia.types.unfinished_block import UnfinishedBlock from chia.util import cached_bls from chia.util.bech32m import encode_puzzle_hash from chia.util.check_fork_next_block import check_fork_next_block from chia.util.condition_tools import pkm_pairs from chia.util.config import PEER_DB_PATH_KEY_DEPRECATED from chia.util.db_wrapper import DBWrapper from chia.util.errors import ConsensusError, Err, ValidationError from chia.util.ints import uint8, uint32, uint64, uint128 from chia.util.path import mkdir, path_from_root from chia.util.safe_cancel_task import cancel_task_safe from chia.util.profiler import profile_task from datetime import datetime from chia.util.db_synchronous import db_synchronous_on from chia.util.db_version import lookup_db_version class FullNode: block_store: BlockStore full_node_store: FullNodeStore full_node_peers: Optional[FullNodePeers] sync_store: Any coin_store: CoinStore mempool_manager: MempoolManager connection: aiosqlite.Connection _sync_task: Optional[asyncio.Task] _init_weight_proof: Optional[asyncio.Task] = None blockchain: Blockchain config: Dict server: Any log: logging.Logger constants: ConsensusConstants _shut_down: bool root_path: Path state_changed_callback: Optional[Callable] timelord_lock: asyncio.Lock initialized: bool weight_proof_handler: Optional[WeightProofHandler] _ui_tasks: Set[asyncio.Task] _blockchain_lock_queue: LockQueue _blockchain_lock_ultra_priority: LockClient _blockchain_lock_high_priority: LockClient _blockchain_lock_low_priority: LockClient _transaction_queue_task: Optional[asyncio.Task] def __init__( self, config: Dict, root_path: Path, consensus_constants: ConsensusConstants, name: str = None, ): self.initialized = False self.root_path = root_path self.config = config self.server = None self._shut_down = False # Set to true to close all infinite loops self.constants = consensus_constants self.pow_creation: Dict[bytes32, asyncio.Event] = {} self.state_changed_callback: Optional[Callable] = None self.full_node_peers = None self.sync_store = None self.signage_point_times = [time.time() for _ in range(self.constants.NUM_SPS_SUB_SLOT)] self.full_node_store = FullNodeStore(self.constants) self.uncompact_task = None self.compact_vdf_requests: Set[bytes32] = set() self.log = logging.getLogger(name if name else __name__) # Used for metrics self.dropped_tx: Set[bytes32] = set() self.not_dropped_tx = 0 self._ui_tasks = set() db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"]) self.db_path = path_from_root(root_path, db_path_replaced) self.coin_subscriptions: Dict[bytes32, Set[bytes32]] = {} # Puzzle Hash : Set[Peer ID] self.ph_subscriptions: Dict[bytes32, Set[bytes32]] = {} # Puzzle Hash : Set[Peer ID] self.peer_coin_ids: Dict[bytes32, Set[bytes32]] = {} # Peer ID: Set[Coin ids] self.peer_puzzle_hash: Dict[bytes32, Set[bytes32]] = {} # Peer ID: Set[puzzle_hash] self.peer_sub_counter: Dict[bytes32, int] = {} # Peer ID: int (subscription count) mkdir(self.db_path.parent) self._transaction_queue_task = None def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback async def _start(self): self.timelord_lock = asyncio.Lock() self.compact_vdf_sem = asyncio.Semaphore(4) # We don't want to run too many concurrent new_peak instances, because it would fetch the same block from # multiple peers and re-validate. self.new_peak_sem = asyncio.Semaphore(2) # These many respond_transaction tasks can be active at any point in time self.respond_transaction_semaphore = asyncio.Semaphore(200) # create the store (db) and full node instance self.connection = await aiosqlite.connect(self.db_path) await self.connection.execute("pragma journal_mode=wal") db_sync = db_synchronous_on(self.config.get("db_sync", "auto"), self.db_path) self.log.info(f"opening blockchain DB: synchronous={db_sync}") await self.connection.execute("pragma synchronous={}".format(db_sync)) if self.config.get("log_sqlite_cmds", False): sql_log_path = path_from_root(self.root_path, "log/sql.log") self.log.info(f"logging SQL commands to {sql_log_path}") def sql_trace_callback(req: str): timestamp = datetime.now().strftime("%H:%M:%S.%f") log = open(sql_log_path, "a") log.write(timestamp + " " + req + "\n") log.close() await self.connection.set_trace_callback(sql_trace_callback) db_version: int = await lookup_db_version(self.connection) self.db_wrapper = DBWrapper(self.connection, db_version=db_version) self.block_store = await BlockStore.create(self.db_wrapper) self.sync_store = await SyncStore.create() self.hint_store = await HintStore.create(self.db_wrapper) self.coin_store = await CoinStore.create(self.db_wrapper) self.log.info("Initializing blockchain from disk") start_time = time.time() reserved_cores = self.config.get("reserved_cores", 0) self.blockchain = await Blockchain.create( self.coin_store, self.block_store, self.constants, self.hint_store, self.db_path.parent, reserved_cores ) self.mempool_manager = MempoolManager(self.coin_store, self.constants) # Blocks are validated under high priority, and transactions under low priority. This guarantees blocks will # be validated first. self._blockchain_lock_queue = LockQueue(self.blockchain.lock) self._blockchain_lock_ultra_priority = LockClient(0, self._blockchain_lock_queue) self._blockchain_lock_high_priority = LockClient(1, self._blockchain_lock_queue) self._blockchain_lock_low_priority = LockClient(2, self._blockchain_lock_queue) # Transactions go into this queue from the server, and get sent to respond_transaction self.transaction_queue = asyncio.PriorityQueue(10000) self._transaction_queue_task = asyncio.create_task(self._handle_transactions()) self.transaction_responses: List[Tuple[bytes32, MempoolInclusionStatus, Optional[Err]]] = [] self.weight_proof_handler = None self._init_weight_proof = asyncio.create_task(self.initialize_weight_proof()) if self.config.get("enable_profiler", False): asyncio.create_task(profile_task(self.root_path, "node", self.log)) self._sync_task = None self._segment_task = None time_taken = time.time() - start_time if self.blockchain.get_peak() is None: self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s") num_unspent = await self.coin_store.num_unspent() if num_unspent > 0: self.log.error( f"Inconsistent blockchain DB file! Could not find peak block but found {num_unspent} coins! " "This is a fatal error. The blockchain database may be corrupt" ) raise RuntimeError("corrupt blockchain DB") else: self.log.info( f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height" f" {self.blockchain.get_peak().height}, " f"time taken: {int(time_taken)}s" ) async with self._blockchain_lock_high_priority: pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak(), []) assert len(pending_tx) == 0 # no pending transactions when starting up peak: Optional[BlockRecord] = self.blockchain.get_peak() if peak is not None: full_peak = await self.blockchain.get_full_peak() mempool_new_peak_result, fns_peak_result = await self.peak_post_processing( full_peak, peak, max(peak.height - 1, 0), None, [] ) await self.peak_post_processing_2( full_peak, peak, max(peak.height - 1, 0), None, ([], {}), mempool_new_peak_result, fns_peak_result ) if self.config["send_uncompact_interval"] != 0: sanitize_weight_proof_only = False if "sanitize_weight_proof_only" in self.config: sanitize_weight_proof_only = self.config["sanitize_weight_proof_only"] assert self.config["target_uncompact_proofs"] != 0 self.uncompact_task = asyncio.create_task( self.broadcast_uncompact_blocks( self.config["send_uncompact_interval"], self.config["target_uncompact_proofs"], sanitize_weight_proof_only, ) ) self.initialized = True if self.full_node_peers is not None: asyncio.create_task(self.full_node_peers.start()) async def _handle_one_transaction(self, entry: TransactionQueueEntry): peer = entry.peer try: inc_status, err = await self.respond_transaction(entry.transaction, entry.spend_name, peer, entry.test) self.transaction_responses.append((entry.spend_name, inc_status, err)) if len(self.transaction_responses) > 50: self.transaction_responses = self.transaction_responses[1:] except asyncio.CancelledError: error_stack = traceback.format_exc() self.log.debug(f"Cancelling _handle_one_transaction, closing: {error_stack}") except Exception: error_stack = traceback.format_exc() self.log.error(f"Error in _handle_one_transaction, closing: {error_stack}") if peer is not None: await peer.close() finally: self.respond_transaction_semaphore.release() async def _handle_transactions(self): try: while not self._shut_down: # We use a semaphore to make sure we don't send more than 200 concurrent calls of respond_transaction. # However doing them one at a time would be slow, because they get sent to other processes. await self.respond_transaction_semaphore.acquire() item: TransactionQueueEntry = (await self.transaction_queue.get())[1] asyncio.create_task(self._handle_one_transaction(item)) except asyncio.CancelledError: raise async def initialize_weight_proof(self): self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain) peak = self.blockchain.get_peak() if peak is not None: await self.weight_proof_handler.create_sub_epoch_segments() def set_server(self, server: ChiaServer): self.server = server dns_servers = [] try: network_name = self.config["selected_network"] default_port = self.config["network_overrides"]["config"][network_name]["default_full_node_port"] except Exception: self.log.info("Default port field not found in config.") default_port = None if "dns_servers" in self.config: dns_servers = self.config["dns_servers"] elif self.config["port"] == 8444: # If `dns_servers` misses from the `config`, hardcode it if we're running mainnet. dns_servers.append("dns-introducer.chia.net") try: self.full_node_peers = FullNodePeers( self.server, self.config["target_peer_count"] - self.config["target_outbound_peer_count"], self.config["target_outbound_peer_count"], PeerStoreResolver( self.root_path, self.config, selected_network=network_name, peers_file_path_key="peers_file_path", legacy_peer_db_path_key=PEER_DB_PATH_KEY_DEPRECATED, default_peers_file_path="db/peers.dat", ), self.config["introducer_peer"], dns_servers, self.config["peer_connect_interval"], self.config["selected_network"], default_port, self.log, ) except Exception as e: error_stack = traceback.format_exc() self.log.error(f"Exception: {e}") self.log.error(f"Exception in peer discovery: {e}") self.log.error(f"Exception Stack: {error_stack}") def _state_changed(self, change: str, change_data: Dict[str, Any] = None): if self.state_changed_callback is not None: self.state_changed_callback(change, change_data) async def short_sync_batch(self, peer: ws.WSChiaConnection, start_height: uint32, target_height: uint32) -> bool: """ Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first block that we download is not connected to our chain, we return False and do an expensive long sync instead. Long sync is not preferred because it requires downloading and validating a weight proof. Args: peer: peer to sync from start_height: height that we should start downloading at. (Our peak is higher) target_height: target to sync to Returns: False if the fork point was not found, and we need to do a long sync. True otherwise. """ # Don't trigger multiple batch syncs to the same peer if ( peer.peer_node_id in self.sync_store.backtrack_syncing and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0 ): return True # Don't batch sync, we are already in progress of a backtrack sync if peer.peer_node_id in self.sync_store.batch_syncing: return True # Don't trigger a long sync self.sync_store.batch_syncing.add(peer.peer_node_id) self.log.info(f"Starting batch short sync from {start_height} to height {target_height}") if start_height > 0: first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False)) if first is None or not isinstance(first, full_node_protocol.RespondBlock): self.sync_store.batch_syncing.remove(peer.peer_node_id) raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}") if not self.blockchain.contains_block(first.block.prev_header_hash): self.log.info("Batch syncing stopped, this is a deep chain") self.sync_store.batch_syncing.remove(peer.peer_node_id) # First sb not connected to our blockchain, do a long sync instead return False batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS if self._segment_task is not None and (not self._segment_task.done()): try: self._segment_task.cancel() except Exception as e: self.log.warning(f"failed to cancel segment task {e}") self._segment_task = None try: for height in range(start_height, target_height, batch_size): end_height = min(target_height, height + batch_size) request = RequestBlocks(uint32(height), uint32(end_height), True) response = await peer.request_blocks(request) if not response: raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}") async with self._blockchain_lock_high_priority: success, advanced_peak, fork_height, coin_changes = await self.receive_block_batch( response.blocks, peer, None ) if not success: raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}") if advanced_peak: peak = self.blockchain.get_peak() try: peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak() assert peak is not None and peak_fb is not None and fork_height is not None mempool_new_peak_result, fns_peak_result = await self.peak_post_processing( peak_fb, peak, fork_height, peer, coin_changes[0] ) await self.peak_post_processing_2( peak_fb, peak, fork_height, peer, coin_changes, mempool_new_peak_result, fns_peak_result ) except asyncio.CancelledError: # Still do post processing after cancel peak_fb = await self.blockchain.get_full_peak() assert peak is not None and peak_fb is not None and fork_height is not None await self.peak_post_processing(peak_fb, peak, fork_height, peer, coin_changes[0]) raise finally: self.log.info(f"Added blocks {height}-{end_height}") except (asyncio.CancelledError, Exception) as e: self.sync_store.batch_syncing.remove(peer.peer_node_id) raise e self.sync_store.batch_syncing.remove(peer.peer_node_id) return True async def short_sync_backtrack( self, peer: ws.WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32 ): """ Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not find the fork point 5 deeper than our peak, we return False and do a long sync instead. Args: peer: peer to sync from peak_height: height of our peak target_height: target height target_unf_hash: partial hash of the unfinished block of the target Returns: True iff we found the fork point, and we do not need to long sync. """ try: if peer.peer_node_id not in self.sync_store.backtrack_syncing: self.sync_store.backtrack_syncing[peer.peer_node_id] = 0 self.sync_store.backtrack_syncing[peer.peer_node_id] += 1 unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash) curr_height: int = target_height found_fork_point = False responses = [] while curr_height > peak_height - 5: # If we already have the unfinished block, don't fetch the transactions. In the normal case, we will # already have the unfinished block, from when it was broadcast, so we just need to download the header, # but not the transactions fetch_tx: bool = unfinished_block is None or curr_height != target_height curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)) if curr is None: raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out") if curr is None or not isinstance(curr, full_node_protocol.RespondBlock): raise ValueError( f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}" ) responses.append(curr) if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0: found_fork_point = True break curr_height -= 1 if found_fork_point: for response in reversed(responses): await self.respond_block(response, peer) except (asyncio.CancelledError, Exception) as e: self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1 raise e self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1 return found_fork_point async def _refresh_ui_connections(self, sleep_before: float = 0): if sleep_before > 0: await asyncio.sleep(sleep_before) self._state_changed("peer_changed_peak") async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection): """ We have received a notification of a new peak from a peer. This happens either when we have just connected, or when the peer has updated their peak. Args: request: information about the new peak peer: peer that sent the message """ try: seen_header_hash = self.sync_store.seen_header_hash(request.header_hash) # Updates heights in the UI. Sleeps 1.5s before, so other peers have time to update their peaks as well. # Limit to 3 refreshes. if not seen_header_hash and len(self._ui_tasks) < 3: self._ui_tasks.add(asyncio.create_task(self._refresh_ui_connections(1.5))) # Prune completed connect tasks self._ui_tasks = set(filter(lambda t: not t.done(), self._ui_tasks)) except Exception as e: self.log.warning(f"Exception UI refresh task: {e}") # Store this peak/peer combination in case we want to sync to it, and to keep track of peers self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True) if self.blockchain.contains_block(request.header_hash): return None # Not interested in less heavy peaks peak: Optional[BlockRecord] = self.blockchain.get_peak() curr_peak_height = uint32(0) if peak is None else peak.height if peak is not None and peak.weight > request.weight: return None if self.sync_store.get_sync_mode(): # If peer connects while we are syncing, check if they have the block we are syncing towards peak_sync_hash = self.sync_store.get_sync_target_hash() peak_sync_height = self.sync_store.get_sync_target_height() if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None: peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_sync_hash]) # Don't ask if we already know this peer has the peak if peer.peer_node_id not in peak_peers: target_peak_response: Optional[RespondBlock] = await peer.request_block( full_node_protocol.RequestBlock(uint32(peak_sync_height), False), timeout=10 ) if target_peak_response is not None and isinstance(target_peak_response, RespondBlock): self.sync_store.peer_has_block( peak_sync_hash, peer.peer_node_id, target_peak_response.block.weight, peak_sync_height, False, ) else: if request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]: # This is the normal case of receiving the next block if await self.short_sync_backtrack( peer, curr_peak_height, request.height, request.unfinished_reward_block_hash ): return None if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS: # This is the case of syncing up more than a few blocks, at the start of the chain self.log.debug("Doing batch sync, no backup") await self.short_sync_batch(peer, uint32(0), request.height) return None if request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]: # This case of being behind but not by so much if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height): return None # This is the either the case where we were not able to sync successfully (for example, due to the fork # point being in the past), or we are very far behind. Performs a long sync. self._sync_task = asyncio.create_task(self._sync()) async def send_peak_to_timelords( self, peak_block: Optional[FullBlock] = None, peer: Optional[ws.WSChiaConnection] = None ): """ Sends current peak to timelords """ if peak_block is None: peak_block = await self.blockchain.get_full_peak() if peak_block is not None: peak = self.blockchain.block_record(peak_block.header_hash) difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False) ses: Optional[SubEpochSummary] = next_sub_epoch_summary( self.constants, self.blockchain, peak.required_iters, peak_block, True, ) recent_rc = self.blockchain.get_recent_reward_challenges() curr = peak while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot: curr = self.blockchain.block_record(curr.prev_hash) if curr.is_challenge_block(self.constants): last_csb_or_eos = curr.total_iters else: last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants) curr = peak passed_ses_height_but_not_yet_included = True while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0: if curr.sub_epoch_summary_included: passed_ses_height_but_not_yet_included = False curr = self.blockchain.block_record(curr.prev_hash) if curr.sub_epoch_summary_included or curr.height == 0: passed_ses_height_but_not_yet_included = False timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord( peak_block.reward_chain_block, difficulty, peak.deficit, peak.sub_slot_iters, ses, recent_rc, last_csb_or_eos, passed_ses_height_but_not_yet_included, ) msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak) if peer is None: await self.server.send_to_all([msg], NodeType.TIMELORD) else: await self.server.send_to_specific([msg], peer.peer_node_id) async def synced(self) -> bool: curr: Optional[BlockRecord] = self.blockchain.get_peak() if curr is None: return False while curr is not None and not curr.is_transaction_block: curr = self.blockchain.try_block_record(curr.prev_hash) now = time.time() if ( curr is None or curr.timestamp is None or curr.timestamp < uint64(int(now - 60 * 7)) or self.sync_store.get_sync_mode() ): return False else: return True async def on_connect(self, connection: ws.WSChiaConnection): """ Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers and challenges to timelords. """ self._state_changed("add_connection") self._state_changed("sync_mode") if self.full_node_peers is not None: asyncio.create_task(self.full_node_peers.on_connect(connection)) if self.initialized is False: return None if connection.connection_type is NodeType.FULL_NODE: # Send filter to node and request mempool items that are not in it (Only if we are currently synced) synced = await self.synced() peak_height = self.blockchain.get_peak_height() if synced and peak_height is not None: my_filter = self.mempool_manager.get_filter() mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter) msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request) await connection.send_message(msg) peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak() if peak_full is not None: peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash) if connection.connection_type is NodeType.FULL_NODE: request_node = full_node_protocol.NewPeak( peak.header_hash, peak.height, peak.weight, peak.height, peak_full.reward_chain_block.get_unfinished().get_hash(), ) await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node)) elif connection.connection_type is NodeType.WALLET: # If connected to a wallet, send the Peak request_wallet = wallet_protocol.NewPeakWallet( peak.header_hash, peak.height, peak.weight, peak.height, ) await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet)) elif connection.connection_type is NodeType.TIMELORD: await self.send_peak_to_timelords() def on_disconnect(self, connection: ws.WSChiaConnection): self.log.info(f"peer disconnected {connection.get_peer_logging()}") self._state_changed("close_connection") self._state_changed("sync_mode") if self.sync_store is not None: self.sync_store.peer_disconnected(connection.peer_node_id) self.remove_subscriptions(connection) def remove_subscriptions(self, peer: ws.WSChiaConnection): # Remove all ph | coin id subscription for this peer node_id = peer.peer_node_id if node_id in self.peer_puzzle_hash: puzzle_hashes = self.peer_puzzle_hash[node_id] for ph in puzzle_hashes: if ph in self.ph_subscriptions: if node_id in self.ph_subscriptions[ph]: self.ph_subscriptions[ph].remove(node_id) if node_id in self.peer_coin_ids: coin_ids = self.peer_coin_ids[node_id] for coin_id in coin_ids: if coin_id in self.coin_subscriptions: if node_id in self.coin_subscriptions[coin_id]: self.coin_subscriptions[coin_id].remove(node_id) if peer.peer_node_id in self.peer_sub_counter: self.peer_sub_counter.pop(peer.peer_node_id) def _num_needed_peers(self) -> int: assert self.server is not None assert self.server.all_connections is not None diff = self.config["target_peer_count"] - len(self.server.all_connections) return diff if diff >= 0 else 0 def _close(self): self._shut_down = True if self._init_weight_proof is not None: self._init_weight_proof.cancel() # blockchain is created in _start and in certain cases it may not exist here during _close if hasattr(self, "blockchain"): self.blockchain.shut_down() # same for mempool_manager if hasattr(self, "mempool_manager"): self.mempool_manager.shut_down() if self.full_node_peers is not None: asyncio.create_task(self.full_node_peers.close()) if self.uncompact_task is not None: self.uncompact_task.cancel() if self._transaction_queue_task is not None: self._transaction_queue_task.cancel() if hasattr(self, "_blockchain_lock_queue"): self._blockchain_lock_queue.close() async def _await_closed(self): cancel_task_safe(self._sync_task, self.log) for task_id, task in list(self.full_node_store.tx_fetch_tasks.items()): cancel_task_safe(task, self.log) await self.connection.close() if self._init_weight_proof is not None: await asyncio.wait([self._init_weight_proof]) if hasattr(self, "_blockchain_lock_queue"): await self._blockchain_lock_queue.await_closed() async def _sync(self): """ Performs a full sync of the blockchain up to the peak. - Wait a few seconds for peers to send us their peaks - Select the heaviest peak, and request a weight proof from a peer with that peak - Validate the weight proof, and disconnect from the peer if invalid - Find the fork point to see where to start downloading blocks - Download blocks in batch (and in parallel) and verify them one at a time - Disconnect peers that provide invalid blocks or don't have the blocks """ if self.weight_proof_handler is None: return None # Ensure we are only syncing once and not double calling this method if self.sync_store.get_sync_mode(): return None if self.sync_store.get_long_sync(): self.log.debug("already in long sync") return None self.sync_store.set_long_sync(True) self.log.debug("long sync started") try: self.log.info("Starting to perform sync.") self.log.info("Waiting to receive peaks from peers.") # Wait until we have 3 peaks or up to a max of 30 seconds peaks = [] for i in range(300): peaks = [tup[0] for tup in self.sync_store.get_peak_of_each_peer().values()] if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3: if self._shut_down: return None await asyncio.sleep(0.1) continue break self.log.info(f"Collected a total of {len(peaks)} peaks.") # Based on responses from peers about the current peaks, see which peak is the heaviest # (similar to longest chain rule). target_peak = self.sync_store.get_heaviest_peak() if target_peak is None: raise RuntimeError("Not performing sync, no peaks collected") heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight = target_peak self.sync_store.set_peak_target(heaviest_peak_hash, heaviest_peak_height) self.log.info(f"Selected peak {heaviest_peak_height}, {heaviest_peak_hash}") # Check which peers are updated to this height peers = [] coroutines = [] for peer in self.server.all_connections.values(): if peer.connection_type == NodeType.FULL_NODE: peers.append(peer.peer_node_id) coroutines.append( peer.request_block( full_node_protocol.RequestBlock(uint32(heaviest_peak_height), True), timeout=10 ) ) for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)): if target_peak_response is not None and isinstance(target_peak_response, RespondBlock): self.sync_store.peer_has_block( heaviest_peak_hash, peers[i], heaviest_peak_weight, heaviest_peak_height, False ) # TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([heaviest_peak_hash]) peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids] # Request weight proof from a random peer self.log.info(f"Total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}") weight_proof_peer = random.choice(peers_with_peak) self.log.info( f"Requesting weight proof from peer {weight_proof_peer.peer_host} up to height" f" {heaviest_peak_height}" ) if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight: raise ValueError("Not performing sync, already caught up.") wp_timeout = 360 if "weight_proof_timeout" in self.config: wp_timeout = self.config["weight_proof_timeout"] self.log.debug(f"weight proof timeout is {wp_timeout} sec") request = full_node_protocol.RequestProofOfWeight(heaviest_peak_height, heaviest_peak_hash) response = await weight_proof_peer.request_proof_of_weight(request, timeout=wp_timeout) # Disconnect from this peer, because they have not behaved properly if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight): await weight_proof_peer.close(600) raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_host}") if response.wp.recent_chain_data[-1].reward_chain_block.height != heaviest_peak_height: await weight_proof_peer.close(600) raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_host}") if response.wp.recent_chain_data[-1].reward_chain_block.weight != heaviest_peak_weight: await weight_proof_peer.close(600) raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_host}") # dont sync to wp if local peak is heavier, # dont ban peer, we asked for this peak current_peak = self.blockchain.get_peak() if current_peak is not None: if response.wp.recent_chain_data[-1].reward_chain_block.weight <= current_peak.weight: raise RuntimeError(f"current peak is heavier than Weight proof peek: {weight_proof_peer.peer_host}") try: validated, fork_point, summaries = await self.weight_proof_handler.validate_weight_proof(response.wp) except Exception as e: await weight_proof_peer.close(600) raise ValueError(f"Weight proof validation threw an error {e}") if not validated: await weight_proof_peer.close(600) raise ValueError("Weight proof validation failed") self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}") self.sync_store.set_sync_mode(True) self._state_changed("sync_mode") # Ensures that the fork point does not change async with self._blockchain_lock_high_priority: await self.blockchain.warmup(fork_point) await self.sync_from_fork_point(fork_point, heaviest_peak_height, heaviest_peak_hash, summaries) except asyncio.CancelledError: self.log.warning("Syncing failed, CancelledError") except Exception as e: tb = traceback.format_exc() self.log.error(f"Error with syncing: {type(e)}{tb}") finally: if self._shut_down: return None await self._finish_sync() async def sync_from_fork_point( self, fork_point_height: uint32, target_peak_sb_height: uint32, peak_hash: bytes32, summaries: List[SubEpochSummary], ): buffer_size = 4 self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}") peers_with_peak = self.get_peers_with_peak(peak_hash) fork_point_height = await check_fork_next_block( self.blockchain, fork_point_height, peers_with_peak, node_next_block_check ) batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS async def fetch_block_batches(batch_queue, peers_with_peak: List[ws.WSChiaConnection]): try: for start_height in range(fork_point_height, target_peak_sb_height, batch_size): end_height = min(target_peak_sb_height, start_height + batch_size) request = RequestBlocks(uint32(start_height), uint32(end_height), True) fetched = False for peer in random.sample(peers_with_peak, len(peers_with_peak)): if peer.closed: peers_with_peak.remove(peer) continue response = await peer.request_blocks(request, timeout=30) if response is None: await peer.close() peers_with_peak.remove(peer) elif isinstance(response, RespondBlocks): await batch_queue.put((peer, response.blocks)) fetched = True break if fetched is False: self.log.error(f"failed fetching {start_height} to {end_height} from peers") await batch_queue.put(None) return if self.sync_store.peers_changed.is_set(): peers_with_peak = self.get_peers_with_peak(peak_hash) self.sync_store.peers_changed.clear() except Exception as e: self.log.error(f"Exception fetching {start_height} to {end_height} from peer {e}") finally: # finished signal with None await batch_queue.put(None) async def validate_block_batches(batch_queue): advanced_peak = False while True: res = await batch_queue.get() if res is None: self.log.debug("done fetching blocks") return peer, blocks = res start_height = blocks[0].height end_height = blocks[-1].height success, advanced_peak, fork_height, coin_states = await self.receive_block_batch( blocks, peer, None if advanced_peak else uint32(fork_point_height), summaries ) if success is False: if peer in peers_with_peak: peers_with_peak.remove(peer) await peer.close(600) raise ValueError(f"Failed to validate block batch {start_height} to {end_height}") self.log.info(f"Added blocks {start_height} to {end_height}") await self.send_peak_to_wallets() peak = self.blockchain.get_peak() if len(coin_states) > 0 and fork_height is not None: await self.update_wallets(peak.height, fork_height, peak.header_hash, coin_states) self.blockchain.clean_block_record(end_height - self.constants.BLOCKS_CACHE_SIZE) loop = asyncio.get_event_loop() batch_queue: asyncio.Queue[Tuple[ws.WSChiaConnection, List[FullBlock]]] = asyncio.Queue( loop=loop, maxsize=buffer_size ) fetch_task = asyncio.Task(fetch_block_batches(batch_queue, peers_with_peak)) validate_task = asyncio.Task(validate_block_batches(batch_queue)) try: await asyncio.gather(fetch_task, validate_task) except Exception as e: assert validate_task.done() fetch_task.cancel() # no need to cancel validate_task, if we end up here validate_task is already done self.log.error(f"sync from fork point failed err: {e}") async def send_peak_to_wallets(self): peak = self.blockchain.get_peak() assert peak is not None msg = make_msg( ProtocolMessageTypes.new_peak_wallet, wallet_protocol.NewPeakWallet( peak.header_hash, peak.height, peak.weight, uint32(max(peak.height - 1, uint32(0))) ), ) await self.server.send_to_all([msg], NodeType.WALLET) def get_peers_with_peak(self, peak_hash: bytes32) -> List: peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash]) if len(peer_ids) == 0: self.log.warning(f"Not syncing, no peers with header_hash {peak_hash} ") return [] peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids] return peers_with_peak async def update_wallets( self, height: uint32, fork_height: uint32, peak_hash: bytes32, state_update: Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]], ): changes_for_peer: Dict[bytes32, Set[CoinState]] = {} states, hint_state = state_update for coin_record in states: if coin_record.name in self.coin_subscriptions: subscribed_peers = self.coin_subscriptions[coin_record.name] for peer in subscribed_peers: if peer not in changes_for_peer: changes_for_peer[peer] = set() changes_for_peer[peer].add(coin_record.coin_state) if coin_record.coin.puzzle_hash in self.ph_subscriptions: subscribed_peers = self.ph_subscriptions[coin_record.coin.puzzle_hash] for peer in subscribed_peers: if peer not in changes_for_peer: changes_for_peer[peer] = set() changes_for_peer[peer].add(coin_record.coin_state) # This is just a verification that the assumptions justifying the ignore below # are valid. hint: bytes for hint, records in hint_state.items(): # While `hint` is typed as a `bytes`, and this is locally verified # immediately above, if it has length 32 then it might match an entry in # `self.ph_subscriptions`. It is unclear if there is a more proper means # of handling this situation. subscribed_peers = self.ph_subscriptions.get(hint) # type: ignore[call-overload] if subscribed_peers is not None: for peer in subscribed_peers: if peer not in changes_for_peer: changes_for_peer[peer] = set() for record in records.values(): changes_for_peer[peer].add(record.coin_state) for peer, changes in changes_for_peer.items(): if peer not in self.server.all_connections: continue ws_peer: ws.WSChiaConnection = self.server.all_connections[peer] state = CoinStateUpdate(height, fork_height, peak_hash, list(changes)) msg = make_msg(ProtocolMessageTypes.coin_state_update, state) await ws_peer.send_message(msg) async def receive_block_batch( self, all_blocks: List[FullBlock], peer: ws.WSChiaConnection, fork_point: Optional[uint32], wp_summaries: Optional[List[SubEpochSummary]] = None, ) -> Tuple[bool, bool, Optional[uint32], Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]]]: advanced_peak = False fork_height: Optional[uint32] = uint32(0) blocks_to_validate: List[FullBlock] = [] for i, block in enumerate(all_blocks): if not self.blockchain.contains_block(block.header_hash): blocks_to_validate = all_blocks[i:] break if len(blocks_to_validate) == 0: return True, False, fork_height, ([], {}) # Validates signatures in multiprocessing since they take a while, and we don't have cached transactions # for these blocks (unlike during normal operation where we validate one at a time) pre_validate_start = time.time() pre_validation_results: List[PreValidationResult] = await self.blockchain.pre_validate_blocks_multiprocessing( blocks_to_validate, {}, wp_summaries=wp_summaries, validate_signatures=True ) pre_validate_end = time.time() if pre_validate_end - pre_validate_start > 10: self.log.warning(f"Block pre-validation time: {pre_validate_end - pre_validate_start:0.2f} seconds") else: self.log.debug(f"Block pre-validation time: {pre_validate_end - pre_validate_start:0.2f} seconds") for i, block in enumerate(blocks_to_validate): if pre_validation_results[i].error is not None: self.log.error( f"Invalid block from peer: {peer.get_peer_logging()} {Err(pre_validation_results[i].error)}" ) return False, advanced_peak, fork_height, ([], {}) # Dicts because deduping all_coin_changes: Dict[bytes32, CoinRecord] = {} all_hint_changes: Dict[bytes, Dict[bytes32, CoinRecord]] = {} for i, block in enumerate(blocks_to_validate): assert pre_validation_results[i].required_iters is not None result, error, fork_height, coin_changes = await self.blockchain.receive_block( block, pre_validation_results[i], None if advanced_peak else fork_point ) coin_record_list, hint_records = coin_changes # Update all changes for record in coin_record_list: all_coin_changes[record.name] = record for hint, list_of_records in hint_records.items(): if hint not in all_hint_changes: all_hint_changes[hint] = {} for record in list_of_records.values(): all_hint_changes[hint][record.name] = record if result == ReceiveBlockResult.NEW_PEAK: advanced_peak = True elif result == ReceiveBlockResult.INVALID_BLOCK or result == ReceiveBlockResult.DISCONNECTED_BLOCK: if error is not None: self.log.error(f"Error: {error}, Invalid block from peer: {peer.get_peer_logging()} ") return False, advanced_peak, fork_height, ([], {}) block_record = self.blockchain.block_record(block.header_hash) if block_record.sub_epoch_summary_included is not None: if self.weight_proof_handler is not None: await self.weight_proof_handler.create_prev_sub_epoch_segments() if advanced_peak: self._state_changed("new_peak") self.log.debug( f"Total time for {len(blocks_to_validate)} blocks: {time.time() - pre_validate_start}, " f"advanced: {advanced_peak}" ) return True, advanced_peak, fork_height, (list(all_coin_changes.values()), all_hint_changes) async def _finish_sync(self): """ Finalize sync by setting sync mode to False, clearing all sync information, and adding any final blocks that we have finalized recently. """ self.log.info("long sync done") self.sync_store.set_long_sync(False) self.sync_store.set_sync_mode(False) self._state_changed("sync_mode") if self.server is None: return None peak: Optional[BlockRecord] = self.blockchain.get_peak() async with self._blockchain_lock_high_priority: await self.sync_store.clear_sync_info() peak_fb: FullBlock = await self.blockchain.get_full_peak() if peak is not None: mempool_new_peak_result, fns_peak_result = await self.peak_post_processing( peak_fb, peak, max(peak.height - 1, 0), None, [] ) await self.peak_post_processing_2( peak_fb, peak, max(peak.height - 1, 0), None, ([], {}), mempool_new_peak_result, fns_peak_result ) if peak is not None and self.weight_proof_handler is not None: await self.weight_proof_handler.get_proof_of_weight(peak.header_hash) self._state_changed("block") def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]): if ( block.foliage.foliage_block_data.pool_target == PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)) and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE and block.reward_chain_block.proof_of_space.pool_public_key is not None ): if not AugSchemeMPL.verify( block.reward_chain_block.proof_of_space.pool_public_key, bytes(block.foliage.foliage_block_data.pool_target), block.foliage.foliage_block_data.pool_signature, ): return False return True async def signage_point_post_processing( self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection, ip_sub_slot: Optional[EndOfSubSlotBundle], ): self.log.info( f"⏲️ Finished signage point {request.index_from_challenge}/" f"{self.constants.NUM_SPS_SUB_SLOT}: " f"CC: {request.challenge_chain_vdf.output.get_hash()} " f"RC: {request.reward_chain_vdf.output.get_hash()} " ) self.signage_point_times[request.index_from_challenge] = time.time() sub_slot_tuple = self.full_node_store.get_sub_slot(request.challenge_chain_vdf.challenge) prev_challenge: Optional[bytes32] if sub_slot_tuple is not None: prev_challenge = sub_slot_tuple[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge else: prev_challenge = None # Notify nodes of the new signage point broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot( prev_challenge, request.challenge_chain_vdf.challenge, request.index_from_challenge, request.reward_chain_vdf.challenge, ) msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast) await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) peak = self.blockchain.get_peak() if peak is not None and peak.height > self.constants.MAX_SUB_SLOT_BLOCKS: sub_slot_iters = peak.sub_slot_iters difficulty = uint64(peak.weight - self.blockchain.block_record(peak.prev_hash).weight) # Makes sure to potentially update the difficulty if we are past the peak (into a new sub-slot) assert ip_sub_slot is not None if request.challenge_chain_vdf.challenge != ip_sub_slot.challenge_chain.get_hash(): next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True) next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True) difficulty = next_difficulty sub_slot_iters = next_sub_slot_iters else: difficulty = self.constants.DIFFICULTY_STARTING sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING # Notify farmers of the new signage point broadcast_farmer = farmer_protocol.NewSignagePoint( request.challenge_chain_vdf.challenge, request.challenge_chain_vdf.output.get_hash(), request.reward_chain_vdf.output.get_hash(), difficulty, sub_slot_iters, request.index_from_challenge, ) msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer) await self.server.send_to_all([msg], NodeType.FARMER) async def peak_post_processing( self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection], coin_changes: List[CoinRecord], ): """ Must be called under self.blockchain.lock. This updates the internal state of the full node with the latest peak information. It also notifies peers about the new peak. """ difficulty = self.blockchain.get_next_difficulty(record.header_hash, False) sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False) self.log.info( f"🌱 Updated peak to height {record.height}, weight {record.weight}, " f"hh {record.header_hash}, " f"forked at {fork_height}, rh: {record.reward_infusion_new_challenge}, " f"total iters: {record.total_iters}, " f"overflow: {record.overflow}, " f"deficit: {record.deficit}, " f"difficulty: {difficulty}, " f"sub slot iters: {sub_slot_iters}, " f"Generator size: " f"{len(bytes(block.transactions_generator)) if block.transactions_generator else 'No tx'}, " f"Generator ref list size: " f"{len(block.transactions_generator_ref_list) if block.transactions_generator else 'No tx'}" ) sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash) assert sub_slots is not None if not self.sync_store.get_sync_mode(): self.blockchain.clean_block_records() fork_block: Optional[BlockRecord] = None if fork_height != block.height - 1 and block.height != 0: # This is a reorg # TODO: address hint error and remove ignore # error: Argument 1 to "block_record" of "Blockchain" has incompatible type "Optional[bytes32]"; # expected "bytes32" [arg-type] fork_block = self.blockchain.block_record(self.blockchain.height_to_hash(fork_height)) # type: ignore[arg-type] # noqa: E501 fns_peak_result: FullNodeStorePeakResult = self.full_node_store.new_peak( record, block, sub_slots[0], sub_slots[1], fork_block, self.blockchain, ) if fns_peak_result.new_signage_points is not None and peer is not None: for index, sp in fns_peak_result.new_signage_points: assert ( sp.cc_vdf is not None and sp.cc_proof is not None and sp.rc_vdf is not None and sp.rc_proof is not None ) await self.signage_point_post_processing( RespondSignagePoint(index, sp.cc_vdf, sp.cc_proof, sp.rc_vdf, sp.rc_proof), peer, sub_slots[1] ) if sub_slots[1] is None: assert record.ip_sub_slot_total_iters(self.constants) == 0 # Ensure the signage point is also in the store, for consistency self.full_node_store.new_signage_point( record.signage_point_index, self.blockchain, record, record.sub_slot_iters, SignagePoint( block.reward_chain_block.challenge_chain_sp_vdf, block.challenge_chain_sp_proof, block.reward_chain_block.reward_chain_sp_vdf, block.reward_chain_sp_proof, ), skip_vdf_validation=True, ) # Update the mempool (returns successful pending transactions added to the mempool) mempool_new_peak_result: List[Tuple[SpendBundle, NPCResult, bytes32]] = await self.mempool_manager.new_peak( self.blockchain.get_peak(), coin_changes ) # Check if we detected a spent transaction, to load up our generator cache if block.transactions_generator is not None and self.full_node_store.previous_generator is None: generator_arg = detect_potential_template_generator(block.height, block.transactions_generator) if generator_arg: self.log.info(f"Saving previous generator for height {block.height}") self.full_node_store.previous_generator = generator_arg return mempool_new_peak_result, fns_peak_result async def peak_post_processing_2( self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection], coin_changes: Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]], mempool_peak_result: List[Tuple[SpendBundle, NPCResult, bytes32]], fns_peak_result: FullNodeStorePeakResult, ): """ Does NOT need to be called under the blockchain lock. Handle other parts of post processing like communicating with peers """ for bundle, result, spend_name in mempool_peak_result: self.log.debug(f"Added transaction to mempool: {spend_name}") mempool_item = self.mempool_manager.get_mempool_item(spend_name) assert mempool_item is not None fees = mempool_item.fee assert fees >= 0 assert mempool_item.cost is not None new_tx = full_node_protocol.NewTransaction( spend_name, mempool_item.cost, fees, ) msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx) await self.server.send_to_all([msg], NodeType.FULL_NODE) # If there were pending end of slots that happen after this peak, broadcast them if they are added if fns_peak_result.added_eos is not None: broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot( fns_peak_result.added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge, fns_peak_result.added_eos.challenge_chain.get_hash(), uint8(0), fns_peak_result.added_eos.reward_chain.end_of_slot_vdf.challenge, ) msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast) await self.server.send_to_all([msg], NodeType.FULL_NODE) # TODO: maybe add and broadcast new IPs as well if record.height % 1000 == 0: # Occasionally clear data in full node store to keep memory usage small self.full_node_store.clear_seen_unfinished_blocks() self.full_node_store.clear_old_cache_entries() if self.sync_store.get_sync_mode() is False: await self.send_peak_to_timelords(block) # Tell full nodes about the new peak msg = make_msg( ProtocolMessageTypes.new_peak, full_node_protocol.NewPeak( record.header_hash, record.height, record.weight, fork_height, block.reward_chain_block.get_unfinished().get_hash(), ), ) if peer is not None: await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) else: await self.server.send_to_all([msg], NodeType.FULL_NODE) # Tell wallets about the new peak msg = make_msg( ProtocolMessageTypes.new_peak_wallet, wallet_protocol.NewPeakWallet( record.header_hash, record.height, record.weight, fork_height, ), ) await self.update_wallets(record.height, fork_height, record.header_hash, coin_changes) await self.server.send_to_all([msg], NodeType.WALLET) self._state_changed("new_peak") async def respond_block( self, respond_block: full_node_protocol.RespondBlock, peer: Optional[ws.WSChiaConnection] = None, ) -> Optional[Message]: """ Receive a full block from a peer full node (or ourselves). """ block: FullBlock = respond_block.block if self.sync_store.get_sync_mode(): return None # Adds the block to seen, and check if it's seen before (which means header is in memory) header_hash = block.header_hash if self.blockchain.contains_block(header_hash): return None pre_validation_result: Optional[PreValidationResult] = None if ( block.is_transaction_block() and block.transactions_info is not None and block.transactions_info.generator_root != bytes([0] * 32) and block.transactions_generator is None ): # This is the case where we already had the unfinished block, and asked for this block without # the transactions (since we already had them). Therefore, here we add the transactions. unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash() unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh) if ( unf_block is not None and unf_block.transactions_generator is not None and unf_block.foliage_transaction_block == block.foliage_transaction_block ): # We checked that the transaction block is the same, therefore all transactions and the signature # must be identical in the unfinished and finished blocks. We can therefore use the cache. pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh) assert pre_validation_result is not None block = dataclasses.replace( block, transactions_generator=unf_block.transactions_generator, transactions_generator_ref_list=unf_block.transactions_generator_ref_list, ) else: # We still do not have the correct information for this block, perhaps there is a duplicate block # with the same unfinished block hash in the cache, so we need to fetch the correct one if peer is None: return None block_response: Optional[Any] = await peer.request_block( full_node_protocol.RequestBlock(block.height, True) ) if block_response is None or not isinstance(block_response, full_node_protocol.RespondBlock): self.log.warning( f"Was not able to fetch the correct block for height {block.height} {block_response}" ) return None new_block: FullBlock = block_response.block if new_block.foliage_transaction_block != block.foliage_transaction_block: self.log.warning(f"Received the wrong block for height {block.height} {new_block.header_hash}") return None assert new_block.transactions_generator is not None self.log.debug( f"Wrong info in the cache for bh {new_block.header_hash}, there might be multiple blocks from the " f"same farmer with the same pospace." ) # This recursion ends here, we cannot recurse again because transactions_generator is not None return await self.respond_block(block_response, peer) coin_changes: Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]] = ([], {}) mempool_new_peak_result, fns_peak_result = None, None async with self._blockchain_lock_high_priority: # After acquiring the lock, check again, because another asyncio thread might have added it if self.blockchain.contains_block(header_hash): return None validation_start = time.time() # Tries to add the block to the blockchain, if we already validated transactions, don't do it again npc_results = {} if pre_validation_result is not None and pre_validation_result.npc_result is not None: npc_results[block.height] = pre_validation_result.npc_result # Don't validate signatures because we want to validate them in the main thread later, since we have a # cache available pre_validation_results = await self.blockchain.pre_validate_blocks_multiprocessing( [block], npc_results, validate_signatures=False ) added: Optional[ReceiveBlockResult] = None pre_validation_time = time.time() - validation_start try: if len(pre_validation_results) < 1: raise ValueError(f"Failed to validate block {header_hash} height {block.height}") if pre_validation_results[0].error is not None: if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH: added = ReceiveBlockResult.DISCONNECTED_BLOCK error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH fork_height: Optional[uint32] = None else: raise ValueError( f"Failed to validate block {header_hash} height " f"{block.height}: {Err(pre_validation_results[0].error).name}" ) else: result_to_validate = ( pre_validation_results[0] if pre_validation_result is None else pre_validation_result ) assert result_to_validate.required_iters == pre_validation_results[0].required_iters added, error_code, fork_height, coin_changes = await self.blockchain.receive_block( block, result_to_validate, None ) if ( self.full_node_store.previous_generator is not None and fork_height is not None and fork_height < self.full_node_store.previous_generator.block_height ): self.full_node_store.previous_generator = None if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK: return None elif added == ReceiveBlockResult.INVALID_BLOCK: assert error_code is not None self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.") raise ConsensusError(error_code, header_hash) elif added == ReceiveBlockResult.DISCONNECTED_BLOCK: self.log.info(f"Disconnected block {header_hash} at height {block.height}") return None elif added == ReceiveBlockResult.NEW_PEAK: # Only propagate blocks which extend the blockchain (becomes one of the heads) new_peak: Optional[BlockRecord] = self.blockchain.get_peak() assert new_peak is not None and fork_height is not None mempool_new_peak_result, fns_peak_result = await self.peak_post_processing( block, new_peak, fork_height, peer, coin_changes[0] ) elif added == ReceiveBlockResult.ADDED_AS_ORPHAN: self.log.info( f"Received orphan block of height {block.height} rh " f"{block.reward_chain_block.get_hash()}" ) else: # Should never reach here, all the cases are covered raise RuntimeError(f"Invalid result from receive_block {added}") except asyncio.CancelledError: # We need to make sure to always call this method even when we get a cancel exception, to make sure # the node stays in sync new_peak = self.blockchain.get_peak() if added == ReceiveBlockResult.NEW_PEAK: assert new_peak is not None assert fork_height is not None await self.peak_post_processing(block, new_peak, fork_height, peer, coin_changes[0]) raise validation_time = time.time() - validation_start if mempool_new_peak_result is not None: assert new_peak is not None assert fork_height is not None assert fns_peak_result is not None await self.peak_post_processing_2( block, new_peak, fork_height, peer, coin_changes, mempool_new_peak_result, fns_peak_result ) percent_full_str = ( ( ", percent full: " + str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3)) + "%" ) if block.transactions_info is not None else "" ) self.log.log( logging.WARNING if validation_time > 2 else logging.DEBUG, f"Block validation time: {validation_time:0.2f} seconds, " f"pre_validation time: {pre_validation_time:0.2f} seconds, " f"cost: {block.transactions_info.cost if block.transactions_info is not None else 'None'}" f"{percent_full_str}", ) # This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP peak = self.blockchain.get_peak() assert peak is not None # Removes all temporary data for old blocks clear_height = uint32(max(0, peak.height - 50)) self.full_node_store.clear_candidate_blocks_below(clear_height) self.full_node_store.clear_unfinished_blocks_below(clear_height) if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode(): await self.sync_store.clear_sync_info() # Occasionally clear sync peer info state_changed_data: Dict[str, Any] = { "transaction_block": False, "k_size": block.reward_chain_block.proof_of_space.size, "header_hash": block.header_hash, "height": block.height, } if block.transactions_info is not None: state_changed_data["transaction_block"] = True state_changed_data["block_cost"] = block.transactions_info.cost state_changed_data["block_fees"] = block.transactions_info.fees if block.foliage_transaction_block is not None: state_changed_data["timestamp"] = block.foliage_transaction_block.timestamp if block.transactions_generator is not None: state_changed_data["transaction_generator_size_bytes"] = len(bytes(block.transactions_generator)) state_changed_data["transaction_generator_ref_list"] = block.transactions_generator_ref_list if added is not None: state_changed_data["receive_block_result"] = added.value self._state_changed("block", state_changed_data) record = self.blockchain.block_record(block.header_hash) if self.weight_proof_handler is not None and record.sub_epoch_summary_included is not None: if self._segment_task is None or self._segment_task.done(): self._segment_task = asyncio.create_task(self.weight_proof_handler.create_prev_sub_epoch_segments()) return None async def respond_unfinished_block( self, respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock, peer: Optional[ws.WSChiaConnection], farmed_block: bool = False, block_bytes: Optional[bytes] = None, ): """ We have received an unfinished block, either created by us, or from another peer. We can validate it and if it's a good block, propagate it to other peers and timelords. """ block = respond_unfinished_block.unfinished_block receive_time = time.time() if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block( block.prev_header_hash ): # No need to request the parent, since the peer will send it to us anyway, via NewPeak self.log.debug("Received a disconnected unfinished block") return None # Adds the unfinished block to seen, and check if it's seen before, to prevent # processing it twice. This searches for the exact version of the unfinished block (there can be many different # foliages for the same trunk). This is intentional, to prevent DOS attacks. # Note that it does not require that this block was successfully processed if self.full_node_store.seen_unfinished_block(block.get_hash()): return None block_hash = block.reward_chain_block.get_hash() # This searched for the trunk hash (unfinished reward hash). If we have already added a block with the same # hash, return if self.full_node_store.get_unfinished_block(block_hash) is not None: return None peak: Optional[BlockRecord] = self.blockchain.get_peak() if peak is not None: if block.total_iters < peak.sp_total_iters(self.constants): # This means this unfinished block is pretty far behind, it will not add weight to our chain return None if block.prev_header_hash == self.constants.GENESIS_CHALLENGE: prev_b = None else: prev_b = self.blockchain.block_record(block.prev_header_hash) # Count the blocks in sub slot, and check if it's a new epoch if len(block.finished_sub_slots) > 0: num_blocks_in_ss = 1 # Curr else: curr = self.blockchain.try_block_record(block.prev_header_hash) num_blocks_in_ss = 2 # Curr and prev while (curr is not None) and not curr.first_in_sub_slot: curr = self.blockchain.try_block_record(curr.prev_hash) num_blocks_in_ss += 1 if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS: # TODO: potentially allow overflow blocks here, which count for the next slot self.log.warning("Too many blocks added, not adding block") return None # The clvm generator and aggregate signature are validated outside of the lock, to allow other blocks and # transactions to get validated npc_result: Optional[NPCResult] = None pre_validation_time = None if block.transactions_generator is not None: pre_validation_start = time.time() assert block.transactions_info is not None try: block_generator: Optional[BlockGenerator] = await self.blockchain.get_block_generator(block) except ValueError: raise ConsensusError(Err.GENERATOR_REF_HAS_NO_GENERATOR) if block_generator is None: raise ConsensusError(Err.GENERATOR_REF_HAS_NO_GENERATOR) if block_bytes is None: block_bytes = bytes(block) height = uint32(0) if prev_b is None else uint32(prev_b.height + 1) npc_result = await self.blockchain.run_generator(block_bytes, block_generator, height) pre_validation_time = time.time() - pre_validation_start pairs_pks, pairs_msgs = pkm_pairs(npc_result.npc_list, self.constants.AGG_SIG_ME_ADDITIONAL_DATA) if not cached_bls.aggregate_verify( pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature, True ): raise ConsensusError(Err.BAD_AGGREGATE_SIGNATURE) async with self._blockchain_lock_high_priority: # TODO: pre-validate VDFs outside of lock validation_start = time.time() validate_result = await self.blockchain.validate_unfinished_block(block, npc_result) if validate_result.error is not None: if validate_result.error == Err.COIN_AMOUNT_NEGATIVE.value: # TODO: remove in the future, hotfix for 1.1.5 peers to not disconnect older peers self.log.info(f"Consensus error {validate_result.error}, not disconnecting") return raise ConsensusError(Err(validate_result.error)) validation_time = time.time() - validation_start # respond_block will later use the cache (validated_signature=True) validate_result = dataclasses.replace(validate_result, validated_signature=True) assert validate_result.required_iters is not None # Perform another check, in case we have already concurrently added the same unfinished block if self.full_node_store.get_unfinished_block(block_hash) is not None: return None if block.prev_header_hash == self.constants.GENESIS_CHALLENGE: height = uint32(0) else: height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1) ses: Optional[SubEpochSummary] = next_sub_epoch_summary( self.constants, self.blockchain, validate_result.required_iters, block, True, ) self.full_node_store.add_unfinished_block(height, block, validate_result) pre_validation_log = ( f"pre_validation time {pre_validation_time:0.4f}, " if pre_validation_time is not None else "" ) if farmed_block is True: self.log.info( f"🍀 ️Farmed unfinished_block {block_hash}, SP: {block.reward_chain_block.signage_point_index}, " f"validation time: {validation_time:0.4f} seconds, {pre_validation_log}" f"cost: {block.transactions_info.cost if block.transactions_info else 'None'} " ) else: percent_full_str = ( ( ", percent full: " + str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3)) + "%" ) if block.transactions_info is not None else "" ) self.log.info( f"Added unfinished_block {block_hash}, not farmed by us," f" SP: {block.reward_chain_block.signage_point_index} farmer response time: " f"{receive_time - self.signage_point_times[block.reward_chain_block.signage_point_index]:0.4f}, " f"Pool pk {encode_puzzle_hash(block.foliage.foliage_block_data.pool_target.puzzle_hash, 'xch')}, " f"validation time: {validation_time:0.4f} seconds, {pre_validation_log}" f"cost: {block.transactions_info.cost if block.transactions_info else 'None'}" f"{percent_full_str}" ) sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty( self.constants, len(block.finished_sub_slots) > 0, prev_b, self.blockchain, ) if block.reward_chain_block.signage_point_index == 0: res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash) if res is None: if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE: rc_prev = self.constants.GENESIS_CHALLENGE else: self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}") return None else: rc_prev = res[0].reward_chain.get_hash() else: assert block.reward_chain_block.reward_chain_sp_vdf is not None rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge timelord_request = timelord_protocol.NewUnfinishedBlockTimelord( block.reward_chain_block, difficulty, sub_slot_iters, block.foliage, ses, rc_prev, ) timelord_msg = make_msg(ProtocolMessageTypes.new_unfinished_block_timelord, timelord_request) await self.server.send_to_all([timelord_msg], NodeType.TIMELORD) full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash()) msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request) if peer is not None: await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) else: await self.server.send_to_all([msg], NodeType.FULL_NODE) self._state_changed("unfinished_block") async def new_infusion_point_vdf( self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[ws.WSChiaConnection] = None ) -> Optional[Message]: # Lookup unfinished blocks unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block( request.unfinished_reward_hash ) if unfinished_block is None: self.log.warning( f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish." ) return None prev_b: Optional[BlockRecord] = None target_rc_hash = request.reward_chain_ip_vdf.challenge last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge # Backtracks through end of slot objects, should work for multiple empty sub slots for eos, _, _ in reversed(self.full_node_store.finished_sub_slots): if eos is not None and eos.reward_chain.get_hash() == target_rc_hash: target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge if target_rc_hash == self.constants.GENESIS_CHALLENGE: prev_b = None else: # Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block # and not an end of slot (since we just looked through the slots and backtracked) curr: Optional[BlockRecord] = self.blockchain.get_peak() for _ in range(10): if curr is None: break if curr.reward_infusion_new_challenge == target_rc_hash: # Found our prev block prev_b = curr break curr = self.blockchain.try_block_record(curr.prev_hash) # If not found, cache keyed on prev block if prev_b is None: self.full_node_store.add_to_future_ip(request) self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}") return None finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots( self.blockchain, prev_b, last_slot_cc_hash, ) if finished_sub_slots is None: return None sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty( self.constants, len(finished_sub_slots) > 0, prev_b, self.blockchain, ) if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE: sub_slot_start_iters = uint128(0) else: ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash) if ss_res is None: self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}") return None _, _, sub_slot_start_iters = ss_res sp_total_iters = uint128( sub_slot_start_iters + calculate_sp_iters( self.constants, sub_slot_iters, unfinished_block.reward_chain_block.signage_point_index, ) ) block: FullBlock = unfinished_block_to_full_block( unfinished_block, request.challenge_chain_ip_vdf, request.challenge_chain_ip_proof, request.reward_chain_ip_vdf, request.reward_chain_ip_proof, request.infused_challenge_chain_ip_vdf, request.infused_challenge_chain_ip_proof, finished_sub_slots, prev_b, self.blockchain, sp_total_iters, difficulty, ) if not self.has_valid_pool_sig(block): self.log.warning("Trying to make a pre-farm block but height is not 0") return None try: await self.respond_block(full_node_protocol.RespondBlock(block)) except Exception as e: self.log.warning(f"Consensus error validating block: {e}") if timelord_peer is not None: # Only sends to the timelord who sent us this VDF, to reset them to the correct peak await self.send_peak_to_timelords(peer=timelord_peer) return None async def respond_end_of_sub_slot( self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection ) -> Tuple[Optional[Message], bool]: fetched_ss = self.full_node_store.get_sub_slot(request.end_of_slot_bundle.challenge_chain.get_hash()) # We are not interested in sub-slots which have the same challenge chain but different reward chain. If there # is a reorg, we will find out through the broadcast of blocks instead. if fetched_ss is not None: # Already have the sub-slot return None, True async with self.timelord_lock: fetched_ss = self.full_node_store.get_sub_slot( request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge ) if ( (fetched_ss is None) and request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != self.constants.GENESIS_CHALLENGE ): # If we don't have the prev, request the prev instead full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot( request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge, uint8(0), bytes32([0] * 32), ) return ( make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request), False, ) peak = self.blockchain.get_peak() if peak is not None and peak.height > 2: next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True) next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True) else: next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING next_difficulty = self.constants.DIFFICULTY_STARTING # Adds the sub slot and potentially get new infusions new_infusions = self.full_node_store.new_finished_sub_slot( request.end_of_slot_bundle, self.blockchain, peak, await self.blockchain.get_full_peak(), ) # It may be an empty list, even if it's not None. Not None means added successfully if new_infusions is not None: self.log.info( f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, " f"{request.end_of_slot_bundle.challenge_chain.get_hash()}, " f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, " f"RC hash: {request.end_of_slot_bundle.reward_chain.get_hash()}, " f"Deficit {request.end_of_slot_bundle.reward_chain.deficit}" ) # Notify full nodes of the new sub-slot broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot( request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge, request.end_of_slot_bundle.challenge_chain.get_hash(), uint8(0), request.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge, ) msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast) await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) for infusion in new_infusions: await self.new_infusion_point_vdf(infusion) # Notify farmers of the new sub-slot broadcast_farmer = farmer_protocol.NewSignagePoint( request.end_of_slot_bundle.challenge_chain.get_hash(), request.end_of_slot_bundle.challenge_chain.get_hash(), request.end_of_slot_bundle.reward_chain.get_hash(), next_difficulty, next_sub_slot_iters, uint8(0), ) msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer) await self.server.send_to_all([msg], NodeType.FARMER) return None, True else: self.log.info( f"End of slot not added CC challenge " f"{request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}" ) return None, False async def respond_transaction( self, transaction: SpendBundle, spend_name: bytes32, peer: Optional[ws.WSChiaConnection] = None, test: bool = False, tx_bytes: Optional[bytes] = None, ) -> Tuple[MempoolInclusionStatus, Optional[Err]]: if self.sync_store.get_sync_mode(): return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING if not test and not (await self.synced()): return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING if self.mempool_manager.seen(spend_name): return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION self.mempool_manager.add_and_maybe_pop_seen(spend_name) self.log.debug(f"Processing transaction: {spend_name}") # Ignore if syncing if self.sync_store.get_sync_mode(): status = MempoolInclusionStatus.FAILED error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING self.mempool_manager.remove_seen(spend_name) else: try: cost_result = await self.mempool_manager.pre_validate_spendbundle(transaction, tx_bytes, spend_name) except ValidationError as e: self.mempool_manager.remove_seen(spend_name) return MempoolInclusionStatus.FAILED, e.code except Exception as e: self.mempool_manager.remove_seen(spend_name) raise e async with self._blockchain_lock_low_priority: if self.mempool_manager.get_spendbundle(spend_name) is not None: self.mempool_manager.remove_seen(spend_name) return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION cost, status, error = await self.mempool_manager.add_spendbundle(transaction, cost_result, spend_name) if status == MempoolInclusionStatus.SUCCESS: self.log.debug( f"Added transaction to mempool: {spend_name} mempool size: " f"{self.mempool_manager.mempool.total_mempool_cost} normalized " f"{self.mempool_manager.mempool.total_mempool_cost / 5000000}" ) # Only broadcast successful transactions, not pending ones. Otherwise it's a DOS # vector. mempool_item = self.mempool_manager.get_mempool_item(spend_name) assert mempool_item is not None fees = mempool_item.fee assert fees >= 0 assert cost is not None new_tx = full_node_protocol.NewTransaction( spend_name, cost, fees, ) msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx) if peer is None: await self.server.send_to_all([msg], NodeType.FULL_NODE) else: await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) self.not_dropped_tx += 1 else: self.mempool_manager.remove_seen(spend_name) self.log.debug( f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}" ) return status, error async def _needs_compact_proof( self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField ) -> bool: if field_vdf == CompressibleVDFField.CC_EOS_VDF: for sub_slot in header_block.finished_sub_slots: if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info: if ( sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0 and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity ): return False return True if field_vdf == CompressibleVDFField.ICC_EOS_VDF: for sub_slot in header_block.finished_sub_slots: if ( sub_slot.infused_challenge_chain is not None and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info ): assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None if ( sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0 and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity ): return False return True if field_vdf == CompressibleVDFField.CC_SP_VDF: if header_block.reward_chain_block.challenge_chain_sp_vdf is None: return False if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf: assert header_block.challenge_chain_sp_proof is not None if ( header_block.challenge_chain_sp_proof.witness_type == 0 and header_block.challenge_chain_sp_proof.normalized_to_identity ): return False return True if field_vdf == CompressibleVDFField.CC_IP_VDF: if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf: if ( header_block.challenge_chain_ip_proof.witness_type == 0 and header_block.challenge_chain_ip_proof.normalized_to_identity ): return False return True return False async def _can_accept_compact_proof( self, vdf_info: VDFInfo, vdf_proof: VDFProof, height: uint32, header_hash: bytes32, field_vdf: CompressibleVDFField, ) -> bool: """ - Checks if the provided proof is indeed compact. - Checks if proof verifies given the vdf_info from the start of sub-slot. - Checks if the provided vdf_info is correct, assuming it refers to the start of sub-slot. - Checks if the existing proof was non-compact. Ignore this proof if we already have a compact proof. """ is_fully_compactified = await self.block_store.is_fully_compactified(header_hash) if is_fully_compactified is None or is_fully_compactified: self.log.info(f"Already compactified block: {header_hash}. Ignoring.") return False peak = self.blockchain.get_peak() if peak is None or peak.height - height < 5: self.log.debug("Will not compactify recent block") return False if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity: self.log.error(f"Received vdf proof is not compact: {vdf_proof}.") return False if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info): self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.") return False header_block = await self.blockchain.get_header_block_by_height(height, header_hash, tx_filter=False) if header_block is None: self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}") return False is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf) if not is_new_proof: self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.") return is_new_proof async def _replace_proof( self, vdf_info: VDFInfo, vdf_proof: VDFProof, height: uint32, field_vdf: CompressibleVDFField, ) -> bool: full_blocks = await self.block_store.get_full_blocks_at([height]) assert len(full_blocks) > 0 replaced = False expected_header_hash = self.blockchain.height_to_hash(height) for block in full_blocks: new_block = None if block.header_hash != expected_header_hash: continue block_record = await self.blockchain.get_block_record_from_db(expected_header_hash) assert block_record is not None if field_vdf == CompressibleVDFField.CC_EOS_VDF: for index, sub_slot in enumerate(block.finished_sub_slots): if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info: new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof) new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs) new_finished_subslots = block.finished_sub_slots new_finished_subslots[index] = new_subslot new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots) break if field_vdf == CompressibleVDFField.ICC_EOS_VDF: for index, sub_slot in enumerate(block.finished_sub_slots): if ( sub_slot.infused_challenge_chain is not None and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info ): new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof) new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs) new_finished_subslots = block.finished_sub_slots new_finished_subslots[index] = new_subslot new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots) break if field_vdf == CompressibleVDFField.CC_SP_VDF: if block.reward_chain_block.challenge_chain_sp_vdf == vdf_info: assert block.challenge_chain_sp_proof is not None new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof) if field_vdf == CompressibleVDFField.CC_IP_VDF: if block.reward_chain_block.challenge_chain_ip_vdf == vdf_info: new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof) if new_block is None: continue async with self.db_wrapper.lock: peak: Optional[BlockRecord] = self.blockchain.get_peak() assert peak is not None if new_block.header_hash == peak.header_hash or peak.height - new_block.height < 5: continue main_chain_hash: Optional[bytes32] = self.blockchain.height_to_hash(new_block.height) assert main_chain_hash is not None in_main_chain: bool = main_chain_hash == new_block.header_hash try: await self.block_store.db_wrapper.begin_transaction() await self.block_store.add_full_block(new_block.header_hash, new_block, block_record, in_main_chain) await self.block_store.db_wrapper.commit_transaction() replaced = True except BaseException as e: await self.block_store.db_wrapper.rollback_transaction() self.log.error( f"_replace_proof error while adding block {block.header_hash} height {block.height}," f" rolling back: {e} {traceback.format_exc()}" ) raise return replaced async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime): field_vdf = CompressibleVDFField(int(request.field_vdf)) if not await self._can_accept_compact_proof( request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf ): return None async with self.blockchain.compact_proof_lock: replaced = await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf) if not replaced: self.log.error(f"Could not replace compact proof: {request.height}") return None self.log.info(f"Replaced compact proof at height {request.height}") msg = make_msg( ProtocolMessageTypes.new_compact_vdf, full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info), ) if self.server is not None: await self.server.send_to_all([msg], NodeType.FULL_NODE) async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection): is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash) if is_fully_compactified is None or is_fully_compactified: return False header_block = await self.blockchain.get_header_block_by_height( request.height, request.header_hash, tx_filter=False ) if header_block is None: return None field_vdf = CompressibleVDFField(int(request.field_vdf)) if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf): peer_request = full_node_protocol.RequestCompactVDF( request.height, request.header_hash, request.field_vdf, request.vdf_info ) response = await peer.request_compact_vdf(peer_request, timeout=10) if response is not None and isinstance(response, full_node_protocol.RespondCompactVDF): await self.respond_compact_vdf(response, peer) async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection): header_block = await self.blockchain.get_header_block_by_height( request.height, request.header_hash, tx_filter=False ) if header_block is None: return None vdf_proof: Optional[VDFProof] = None field_vdf = CompressibleVDFField(int(request.field_vdf)) if field_vdf == CompressibleVDFField.CC_EOS_VDF: for sub_slot in header_block.finished_sub_slots: if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info: vdf_proof = sub_slot.proofs.challenge_chain_slot_proof break if field_vdf == CompressibleVDFField.ICC_EOS_VDF: for sub_slot in header_block.finished_sub_slots: if ( sub_slot.infused_challenge_chain is not None and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info ): vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof break if ( field_vdf == CompressibleVDFField.CC_SP_VDF and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info ): vdf_proof = header_block.challenge_chain_sp_proof if ( field_vdf == CompressibleVDFField.CC_IP_VDF and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info ): vdf_proof = header_block.challenge_chain_ip_proof if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity: self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.") return None compact_vdf = full_node_protocol.RespondCompactVDF( request.height, request.header_hash, request.field_vdf, request.vdf_info, vdf_proof, ) msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf) await peer.send_message(msg) async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection): field_vdf = CompressibleVDFField(int(request.field_vdf)) if not await self._can_accept_compact_proof( request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf ): return None async with self.blockchain.compact_proof_lock: if self.blockchain.seen_compact_proofs(request.vdf_info, request.height): return None replaced = await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf) if not replaced: self.log.error(f"Could not replace compact proof: {request.height}") return None msg = make_msg( ProtocolMessageTypes.new_compact_vdf, full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info), ) if self.server is not None: await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id) async def broadcast_uncompact_blocks( self, uncompact_interval_scan: int, target_uncompact_proofs: int, sanitize_weight_proof_only: bool ): try: while not self._shut_down: while self.sync_store.get_sync_mode() or self.sync_store.get_long_sync(): if self._shut_down: return None await asyncio.sleep(30) broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = [] self.log.info("Getting random heights for bluebox to compact") heights = await self.block_store.get_random_not_compactified(target_uncompact_proofs) self.log.info("Heights found for bluebox to compact: [%s]" % ", ".join(map(str, heights))) for h in heights: headers = await self.blockchain.get_header_blocks_in_range(h, h, tx_filter=False) records: Dict[bytes32, BlockRecord] = {} if sanitize_weight_proof_only: records = await self.blockchain.get_block_records_in_range(h, h) for header in headers.values(): expected_header_hash = self.blockchain.height_to_hash(header.height) if header.header_hash != expected_header_hash: continue if sanitize_weight_proof_only: assert header.header_hash in records record = records[header.header_hash] for sub_slot in header.finished_sub_slots: if ( sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0 or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity ): broadcast_list.append( timelord_protocol.RequestCompactProofOfTime( sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf, header.header_hash, header.height, uint8(CompressibleVDFField.CC_EOS_VDF), ) ) if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and ( sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0 or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity ): assert sub_slot.infused_challenge_chain is not None broadcast_list.append( timelord_protocol.RequestCompactProofOfTime( sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf, header.header_hash, header.height, uint8(CompressibleVDFField.ICC_EOS_VDF), ) ) # Running in 'sanitize_weight_proof_only' ignores CC_SP_VDF and CC_IP_VDF # unless this is a challenge block. if sanitize_weight_proof_only: if not record.is_challenge_block(self.constants): continue if header.challenge_chain_sp_proof is not None and ( header.challenge_chain_sp_proof.witness_type > 0 or not header.challenge_chain_sp_proof.normalized_to_identity ): assert header.reward_chain_block.challenge_chain_sp_vdf is not None broadcast_list.append( timelord_protocol.RequestCompactProofOfTime( header.reward_chain_block.challenge_chain_sp_vdf, header.header_hash, header.height, uint8(CompressibleVDFField.CC_SP_VDF), ) ) if ( header.challenge_chain_ip_proof.witness_type > 0 or not header.challenge_chain_ip_proof.normalized_to_identity ): broadcast_list.append( timelord_protocol.RequestCompactProofOfTime( header.reward_chain_block.challenge_chain_ip_vdf, header.header_hash, header.height, uint8(CompressibleVDFField.CC_IP_VDF), ) ) if len(broadcast_list) > target_uncompact_proofs: broadcast_list = broadcast_list[:target_uncompact_proofs] if self.sync_store.get_sync_mode() or self.sync_store.get_long_sync(): continue if self.server is not None: self.log.info(f"Broadcasting {len(broadcast_list)} items to the bluebox") msgs = [] for new_pot in broadcast_list: msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot) msgs.append(msg) await self.server.send_to_all(msgs, NodeType.TIMELORD) await asyncio.sleep(uncompact_interval_scan) except Exception as e: error_stack = traceback.format_exc() self.log.error(f"Exception in broadcast_uncompact_blocks: {e}") self.log.error(f"Exception Stack: {error_stack}") async def node_next_block_check( peer: ws.WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface ) -> bool: block_response: Optional[Any] = await peer.request_block(full_node_protocol.RequestBlock(potential_peek, True)) if block_response is not None and isinstance(block_response, full_node_protocol.RespondBlock): peak = blockchain.get_peak() if peak is not None and block_response.block.prev_header_hash == peak.header_hash: return True return False
# Copyright (c) Glow Contributors. See CONTRIBUTORS file. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from tests import utils class ArgMinModule(torch.nn.Module): def __init__(self, dim=None, keepDims=True): super(ArgMinModule, self).__init__() self.dim = dim self.keepDims = keepDims def forward(self, tensor): if self.dim: return torch.argmin(tensor, self.dim, self.keepDims) else: return torch.argmin(tensor) class ArgMaxModule(torch.nn.Module): def __init__(self, dim=None, keepDims=True): super(ArgMaxModule, self).__init__() self.dim = dim self.keepDims = keepDims def forward(self, tensor): if self.dim: return torch.argmax(tensor, self.dim, self.keepDims) else: return torch.argmax(tensor) class TestArgMin(utils.TorchGlowTestCase): @utils.deterministic_expand( [ lambda: ("basic", ArgMinModule(), torch.randn(4)), lambda: ("dimensions1", ArgMinModule(1, False), torch.randn(4, 4)), lambda: ("dimensions2", ArgMinModule(1), torch.randn(5, 5)), ] ) def test_argmin_node(self, _, module, tensor): """Test of the PyTorch ArgMin node on Glow.""" utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmin"}) class TestArgMax(utils.TorchGlowTestCase): @utils.deterministic_expand( [ lambda: ("basic", ArgMaxModule(), torch.randn(4)), lambda: ("dimensions1", ArgMaxModule(1, False), torch.randn(4, 4)), lambda: ("dimensions2", ArgMaxModule(1), torch.randn(5, 5)), ] ) def test_argmax_node(self, _, module, tensor): """Test of the PyTorch ArgMax node on Glow.""" utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmax"})
module.exports = { prefix: 'fab', iconName: 'flipboard', icon: [448, 512, [], "f44d", "M0 32v448h448V32H0zm358.4 179.2h-89.6v89.6h-89.6v89.6H89.6V121.6h268.8v89.6z"] };
# # Copyright (c) 2021 The Markovflow Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Module containing the integration tests for the `SparsePowerExpectationPropagation` class.""" import numpy as np import pytest import tensorflow as tf from gpflow.likelihoods import Gaussian from markovflow.kernels import Matern12 from markovflow.likelihoods import PEPGaussian, PEPScalarLikelihood from markovflow.models import ( GaussianProcessRegression, SparseCVIGaussianProcess, SparsePowerExpectationPropagation, ) from tests.tools.generate_random_objects import generate_random_time_observations OUT_DIM = 1 LENGTH_SCALE = 2.0 VARIANCE = 2.25 NUM_DATA = 2 batch_shape = () output_dim = 1 @pytest.fixture(name="spep_gpr_optim_setup") def _spep_gpr_optim_setup(): """ Creates a GPR model and a matched Sparse PEP model (z=x), and optimize the later (single step) """ time_points, observations, kernel, variance = _setup() chol_obs_covariance = tf.eye(output_dim, dtype=tf.float64) * tf.sqrt(variance) input_data = (time_points, observations) inducing_points = time_points + 1e-10 gpr = GaussianProcessRegression( kernel=kernel, input_data=input_data, chol_obs_covariance=chol_obs_covariance, mean_function=None, ) likelihood = Gaussian(variance=variance) sep = SparsePowerExpectationPropagation( kernel=kernel, inducing_points=inducing_points, likelihood=PEPScalarLikelihood(likelihood), learning_rate=0.1, alpha=1.0, ) scvi = SparseCVIGaussianProcess( kernel=kernel, inducing_points=inducing_points, likelihood=likelihood, learning_rate=1.0, ) # do not train any hyper-parameters for these tests for t in likelihood.trainable_variables + kernel.trainable_variables: t._trainable = False # update sites -> optimal scvi.update_sites(input_data) sep.nat1.assign(scvi.nat1.numpy()) sep.nat2.assign(scvi.nat2.numpy()) return sep, gpr, input_data def _setup(): """ Data, kernel and likelihood setup """ time_points, observations = generate_random_time_observations( obs_dim=output_dim, num_data=NUM_DATA, batch_shape=batch_shape ) time_points = tf.constant(time_points) observations = tf.constant(observations) kernel = Matern12(lengthscale=LENGTH_SCALE, variance=VARIANCE, output_dim=output_dim) observation_noise = 1.0 variance = tf.constant(observation_noise, dtype=tf.float64) return time_points, observations, kernel, variance def test_optimal_sites(with_tf_random_seed, spep_gpr_optim_setup): """Test that the optimal value of the exact sites match the true sites """ spep, gpr, data = spep_gpr_optim_setup spep.learning_rate = 1.0 spep.alpha = 1.0 spep.update_sites(data) sd = spep.kernel.state_dim # for z = x, the sites are 2 sd x 2 sd but half empty # one part must match the GPR site spep_nat1 = spep.nat1.numpy()[:-1, sd:] spep_nat2 = spep.nat2.numpy()[:-1, sd:, sd:] spep_log_norm = spep.log_norm.numpy()[:-1] spep_energy = spep.energy(data).numpy() # manually compute the optimal sites s2 = gpr._chol_obs_covariance.numpy() ** 2 gpr_nat1 = gpr.observations / s2 gpr_nat2 = -0.5 / s2 * np.ones_like(spep_nat2) gpr_log_norm = -0.5 * gpr.observations.numpy() ** 2 / s2 - 0.5 * np.log(2.0 * np.pi * s2) gpr_llh = gpr.log_likelihood().numpy() np.testing.assert_array_almost_equal(spep_nat1, gpr_nat1, decimal=3) np.testing.assert_array_almost_equal(spep_nat2, gpr_nat2, decimal=3) np.testing.assert_array_almost_equal(gpr_log_norm, spep_log_norm, decimal=4) np.testing.assert_array_almost_equal(gpr_llh, spep_energy, decimal=4) def test_log_norm(with_tf_random_seed, spep_gpr_optim_setup): """Test that the optimal value of the exact sites match the true sites """ # sites are set to optimal (but not the sites) spep, gpr, data = spep_gpr_optim_setup a = 1.0 spep.alpha = a spep_log_norm = spep.compute_log_norm(data).numpy()[:-1] s2 = gpr._chol_obs_covariance.numpy() ** 2 gpr_log_norm = -0.5 * gpr.observations.numpy() ** 2 / s2 - 0.5 * np.log(2.0 * np.pi * s2) np.testing.assert_array_almost_equal(gpr_log_norm, spep_log_norm, decimal=4) def test_convergence_of_spep(with_tf_random_seed, spep_gpr_optim_setup): """Test that the optimal sites are fixed points of the update """ spep, gpr, input_data = spep_gpr_optim_setup # run EP site optimization for _ in range(20): spep.update_sites(input_data) # run one last step of EP old_nat1 = spep.nat1.numpy() old_nat2 = spep.nat2.numpy() spep.update_sites(input_data) new_nat1 = spep.nat1.numpy() new_nat2 = spep.nat2.numpy() np.testing.assert_array_almost_equal(new_nat1, old_nat1) np.testing.assert_array_almost_equal(new_nat2, old_nat2) np.testing.assert_array_almost_equal( gpr.log_likelihood().numpy(), spep.energy(input_data=input_data) )
__NUXT_JSONP__("/amp/37/3", (function(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,_,$,aa,ab,ac,ad,ae,af,ag,ah,ai,aj,ak,al,am,an,ao,ap){return {data:[{metaTitle:A,metaDesc:B,verseId:C,surahId:37,currentSurah:{number:"37",name:"الصّٰۤفّٰت",name_latin:"As-Saffat",number_of_ayah:"182",text:{"1":"وَالصّٰۤفّٰتِ صَفًّاۙ","2":"فَالزّٰجِرٰتِ زَجْرًاۙ","3":"فَالتّٰلِيٰتِ ذِكْرًاۙ","4":"اِنَّ اِلٰهَكُمْ لَوَاحِدٌۗ ","5":"رَبُّ السَّمٰوٰتِ وَالْاَرْضِ وَمَا بَيْنَهُمَا وَرَبُّ الْمَشَارِقِۗ ","6":"اِنَّا زَيَّنَّا السَّمَاۤءَ الدُّنْيَا بِزِيْنَةِ ِۨالْكَوَاكِبِۙ","7":"وَحِفْظًا مِّنْ كُلِّ شَيْطٰنٍ مَّارِدٍۚ ","8":"لَا يَسَّمَّعُوْنَ اِلَى الْمَلَاِ الْاَعْلٰى وَيُقْذَفُوْنَ مِنْ كُلِّ جَانِبٍۖ ","9":"دُحُوْرًا وَّلَهُمْ عَذَابٌ وَّاصِبٌ ","10":"اِلَّا مَنْ خَطِفَ الْخَطْفَةَ فَاَتْبَعَهٗ شِهَابٌ ثَاقِبٌ ","11":"فَاسْتَفْتِهِمْ اَهُمْ اَشَدُّ خَلْقًا اَمْ مَّنْ خَلَقْنَا ۗاِنَّا خَلَقْنٰهُمْ مِّنْ طِيْنٍ لَّازِبٍ ","12":"بَلْ عَجِبْتَ وَيَسْخَرُوْنَ ۖ ","13":"وَاِذَا ذُكِّرُوْا لَا يَذْكُرُوْنَ ۖ ","14":"وَاِذَا رَاَوْا اٰيَةً يَّسْتَسْخِرُوْنَۖ ","15":"وَقَالُوْٓا اِنْ هٰذَآ اِلَّا سِحْرٌ مُّبِيْنٌ ۚ ","16":"ءَاِذَا مِتْنَا وَكُنَّا تُرَابًا وَّعِظَامًا ءَاِنَّا لَمَبْعُوْثُوْنَۙ","17":"اَوَاٰبَاۤؤُنَا الْاَوَّلُوْنَۗ ","18":"قُلْ نَعَمْ وَاَنْتُمْ دَاخِرُوْنَۚ ","19":"فَاِنَّمَا هِيَ زَجْرَةٌ وَّاحِدَةٌ فَاِذَا هُمْ يَنْظُرُوْنَ ","20":"وَقَالُوْا يٰوَيْلَنَا هٰذَا يَوْمُ الدِّيْنِ ","21":"هٰذَا يَوْمُ الْفَصْلِ الَّذِيْ كُنْتُمْ بِهٖ تُكَذِّبُوْنَ ࣖ","22":"اُحْشُرُوا الَّذِيْنَ ظَلَمُوْا وَاَزْوَاجَهُمْ وَمَا كَانُوْا يَعْبُدُوْنَ ۙ","23":"مِنْ دُوْنِ اللّٰهِ فَاهْدُوْهُمْ اِلٰى صِرَاطِ الْجَحِيْمِ ","24":"وَقِفُوْهُمْ اِنَّهُمْ مَّسْـُٔوْلُوْنَ ۙ","25":"مَا لَكُمْ لَا تَنَاصَرُوْنَ ","26":"بَلْ هُمُ الْيَوْمَ مُسْتَسْلِمُوْنَ","27":"وَاَقْبَلَ بَعْضُهُمْ عَلٰى بَعْضٍ يَّتَسَاۤءَلُوْنَ ","28":"قَالُوْٓا اِنَّكُمْ كُنْتُمْ تَأْتُوْنَنَا عَنِ الْيَمِيْنِ ","29":"قَالُوْا بَلْ لَّمْ تَكُوْنُوْا مُؤْمِنِيْنَۚ ","30":"وَمَا كَانَ لَنَا عَلَيْكُمْ مِّنْ سُلْطٰنٍۚ بَلْ كُنْتُمْ قَوْمًا طٰغِيْنَ ","31":"فَحَقَّ عَلَيْنَا قَوْلُ رَبِّنَآ ۖاِنَّا لَذَاۤىِٕقُوْنَ ","32":"فَاَغْوَيْنٰكُمْ اِنَّا كُنَّا غٰوِيْنَ ","33":"فَاِنَّهُمْ يَوْمَىِٕذٍ فِى الْعَذَابِ مُشْتَرِكُوْنَ","34":"اِنَّا كَذٰلِكَ نَفْعَلُ بِالْمُجْرِمِيْنَ","35":"اِنَّهُمْ كَانُوْٓا اِذَا قِيْلَ لَهُمْ لَآ اِلٰهَ اِلَّا اللّٰهُ يَسْتَكْبِرُوْنَ ۙ","36":"وَيَقُوْلُوْنَ اَىِٕنَّا لَتَارِكُوْٓا اٰلِهَتِنَا لِشَاعِرٍ مَّجْنُوْنٍ ۗ ","37":"بَلْ جَاۤءَ بِالْحَقِّ وَصَدَّقَ الْمُرْسَلِيْنَ","38":"اِنَّكُمْ لَذَاۤىِٕقُوا الْعَذَابِ الْاَلِيْمِ ۚ ","39":"وَمَا تُجْزَوْنَ اِلَّا مَا كُنْتُمْ تَعْمَلُوْنَۙ","40":"اِلَّا عِبَادَ اللّٰهِ الْمُخْلَصِيْنَ ","41":"اُولٰۤىِٕكَ لَهُمْ رِزْقٌ مَّعْلُوْمٌۙ","42":"فَوَاكِهُ ۚوَهُمْ مُّكْرَمُوْنَۙ","43":"فِيْ جَنّٰتِ النَّعِيْمِۙ","44":"عَلٰى سُرُرٍ مُّتَقٰبِلِيْنَ","45":"يُطَافُ عَلَيْهِمْ بِكَأْسٍ مِّنْ مَّعِيْنٍۢ ۙ","46":"بَيْضَاۤءَ لَذَّةٍ لِّلشّٰرِبِيْنَۚ ","47":"لَا فِيْهَا غَوْلٌ وَّلَا هُمْ عَنْهَا يُنْزَفُوْنَ","48":"وَعِنْدَهُمْ قٰصِرٰتُ الطَّرْفِ عِيْنٌ ۙ","49":"كَاَنَّهُنَّ بَيْضٌ مَّكْنُوْنٌ","50":"فَاَقْبَلَ بَعْضُهُمْ عَلٰى بَعْضٍ يَّتَسَاۤءَلُوْنَ","51":"قَالَ قَاۤىِٕلٌ مِّنْهُمْ اِنِّيْ كَانَ لِيْ قَرِيْنٌۙ","52":"يَّقُوْلُ اَىِٕنَّكَ لَمِنَ الْمُصَدِّقِيْنَ","53":"ءَاِذَا مِتْنَا وَكُنَّا تُرَابًا وَّعِظَامًا ءَاِنَّا لَمَدِيْنُوْنَ","54":"قَالَ هَلْ اَنْتُمْ مُّطَّلِعُوْنَ","55":"فَاطَّلَعَ فَرَاٰهُ فِيْ سَوَاۤءِ الْجَحِيْمِ","56":"قَالَ تَاللّٰهِ اِنْ كِدْتَّ لَتُرْدِيْنِ ۙ","57":"وَلَوْلَا نِعْمَةُ رَبِّيْ لَكُنْتُ مِنَ الْمُحْضَرِيْنَ","58":"اَفَمَا نَحْنُ بِمَيِّتِيْنَۙ","59":"اِلَّا مَوْتَتَنَا الْاُوْلٰى وَمَا نَحْنُ بِمُعَذَّبِيْنَ","60":"اِنَّ هٰذَا لَهُوَ الْفَوْزُ الْعَظِيْمُ","61":"لِمِثْلِ هٰذَا فَلْيَعْمَلِ الْعٰمِلُوْنَ","62":"اَذٰلِكَ خَيْرٌ نُّزُلًا اَمْ شَجَرَةُ الزَّقُّوْمِ","63":"اِنَّا جَعَلْنٰهَا فِتْنَةً لِّلظّٰلِمِيْنَ","64":"اِنَّهَا شَجَرَةٌ تَخْرُجُ فِيْٓ اَصْلِ الْجَحِيْمِۙ","65":"طَلْعُهَا كَاَنَّهٗ رُءُوْسُ الشَّيٰطِيْنِ","66":"فَاِنَّهُمْ لَاٰكِلُوْنَ مِنْهَا فَمَالِـُٔوْنَ مِنْهَا الْبُطُوْنَۗ ","67":"ثُمَّ اِنَّ لَهُمْ عَلَيْهَا لَشَوْبًا مِّنْ حَمِيْمٍۚ ","68":"ثُمَّ اِنَّ مَرْجِعَهُمْ لَاِلَى الْجَحِيْمِ","69":"اِنَّهُمْ اَلْفَوْا اٰبَاۤءَهُمْ ضَاۤلِّيْنَۙ","70":"فَهُمْ عَلٰٓى اٰثٰرِهِمْ يُهْرَعُوْنَ","71":"وَلَقَدْ ضَلَّ قَبْلَهُمْ اَكْثَرُ الْاَوَّلِيْنَۙ","72":"وَلَقَدْ اَرْسَلْنَا فِيْهِمْ مُّنْذِرِيْنَ","73":"فَانْظُرْ كَيْفَ كَانَ عَاقِبَةُ الْمُنْذَرِيْنَۙ","74":"اِلَّا عِبَادَ اللّٰهِ الْمُخْلَصِيْنَ ࣖ ","75":"وَلَقَدْ نَادٰىنَا نُوْحٌ فَلَنِعْمَ الْمُجِيْبُوْنَۖ ","76":"وَنَجَّيْنٰهُ وَاَهْلَهٗ مِنَ الْكَرْبِ الْعَظِيْمِۖ ","77":"وَجَعَلْنَا ذُرِّيَّتَهٗ هُمُ الْبٰقِيْنَ","78":D,"79":"سَلٰمٌ عَلٰى نُوْحٍ فِى الْعٰلَمِيْنَ","80":g,"81":h,"82":"ثُمَّ اَغْرَقْنَا الْاٰخَرِيْنَ","83":"وَاِنَّ مِنْ شِيْعَتِهٖ لَاِبْرٰهِيْمَ ۘ ","84":"اِذْ جَاۤءَ رَبَّهٗ بِقَلْبٍ سَلِيْمٍۙ","85":"اِذْ قَالَ لِاَبِيْهِ وَقَوْمِهٖ مَاذَا تَعْبُدُوْنَ ۚ ","86":"اَىِٕفْكًا اٰلِهَةً دُوْنَ اللّٰهِ تُرِيْدُوْنَۗ ","87":"فَمَا ظَنُّكُمْ بِرَبِّ الْعٰلَمِيْنَ","88":"فَنَظَرَ نَظْرَةً فِى النُّجُوْمِۙ","89":"فَقَالَ اِنِّيْ سَقِيْمٌ","90":"فَتَوَلَّوْا عَنْهُ مُدْبِرِيْنَ","91":"فَرَاغَ اِلٰٓى اٰلِهَتِهِمْ فَقَالَ اَلَا تَأْكُلُوْنَۚ ","92":"مَا لَكُمْ لَا تَنْطِقُوْنَ","93":"فَرَاغَ عَلَيْهِمْ ضَرْبًا ۢبِالْيَمِيْنِ","94":"فَاَقْبَلُوْٓا اِلَيْهِ يَزِفُّوْنَ","95":"قَالَ اَتَعْبُدُوْنَ مَا تَنْحِتُوْنَۙ","96":"وَاللّٰهُ خَلَقَكُمْ وَمَا تَعْمَلُوْنَ","97":"قَالُوا ابْنُوْا لَهٗ بُنْيَانًا فَاَلْقُوْهُ فِى الْجَحِيْمِ","98":"فَاَرَادُوْا بِهٖ كَيْدًا فَجَعَلْنٰهُمُ الْاَسْفَلِيْنَ","99":"وَقَالَ اِنِّيْ ذَاهِبٌ اِلٰى رَبِّيْ سَيَهْدِيْنِ","100":"رَبِّ هَبْ لِيْ مِنَ الصّٰلِحِيْنَ","101":"فَبَشَّرْنٰهُ بِغُلٰمٍ حَلِيْمٍ","102":"فَلَمَّا بَلَغَ مَعَهُ السَّعْيَ قَالَ يٰبُنَيَّ اِنِّيْٓ اَرٰى فِى الْمَنَامِ اَنِّيْٓ اَذْبَحُكَ فَانْظُرْ مَاذَا تَرٰىۗ قَالَ يٰٓاَبَتِ افْعَلْ مَا تُؤْمَرُۖ سَتَجِدُنِيْٓ اِنْ شَاۤءَ اللّٰهُ مِنَ الصّٰبِرِيْنَ","103":"فَلَمَّآ اَسْلَمَا وَتَلَّهٗ لِلْجَبِيْنِۚ ","104":"وَنَادَيْنٰهُ اَنْ يّٰٓاِبْرٰهِيْمُ ۙ","105":"قَدْ صَدَّقْتَ الرُّؤْيَا ۚاِنَّا كَذٰلِكَ نَجْزِى الْمُحْسِنِيْنَ","106":"اِنَّ هٰذَا لَهُوَ الْبَلٰۤؤُا الْمُبِيْنُ","107":"وَفَدَيْنٰهُ بِذِبْحٍ عَظِيْمٍ","108":D,"109":"سَلٰمٌ عَلٰٓى اِبْرٰهِيْمَ","110":"كَذٰلِكَ نَجْزِى الْمُحْسِنِيْنَ","111":h,"112":"وَبَشَّرْنٰهُ بِاِسْحٰقَ نَبِيًّا مِّنَ الصّٰلِحِيْنَ","113":"وَبٰرَكْنَا عَلَيْهِ وَعَلٰٓى اِسْحٰقَۗ وَمِنْ ذُرِّيَّتِهِمَا مُحْسِنٌ وَّظَالِمٌ لِّنَفْسِهٖ مُبِيْنٌ ࣖ","114":"وَلَقَدْ مَنَنَّا عَلٰى مُوْسٰى وَهٰرُوْنَ ۚ ","115":"وَنَجَّيْنٰهُمَا وَقَوْمَهُمَا مِنَ الْكَرْبِ الْعَظِيْمِۚ ","116":"وَنَصَرْنٰهُمْ فَكَانُوْا هُمُ الْغٰلِبِيْنَۚ ","117":"وَاٰتَيْنٰهُمَا الْكِتٰبَ الْمُسْتَبِيْنَ ۚ ","118":"وَهَدَيْنٰهُمَا الصِّرَاطَ الْمُسْتَقِيْمَۚ ","119":"وَتَرَكْنَا عَلَيْهِمَا فِى الْاٰخِرِيْنَ ۖ ","120":"سَلٰمٌ عَلٰى مُوْسٰى وَهٰرُوْنَ","121":g,"122":"اِنَّهُمَا مِنْ عِبَادِنَا الْمُؤْمِنِيْنَ","123":"وَاِنَّ اِلْيَاسَ لَمِنَ الْمُرْسَلِيْنَۗ ","124":"اِذْ قَالَ لِقَوْمِهٖٓ اَلَا تَتَّقُوْنَ","125":"اَتَدْعُوْنَ بَعْلًا وَّتَذَرُوْنَ اَحْسَنَ الْخَالِقِيْنَۙ","126":"اللّٰهَ رَبَّكُمْ وَرَبَّ اٰبَاۤىِٕكُمُ الْاَوَّلِيْنَ","127":"فَكَذَّبُوْهُ فَاِنَّهُمْ لَمُحْضَرُوْنَۙ","128":E,"129":"وَتَرَكْنَا عَلَيْهِ فِى الْاٰخِرِيْنَ ۙ","130":"سَلٰمٌ عَلٰٓى اِلْ يَاسِيْنَ","131":g,"132":h,"133":"وَاِنَّ لُوْطًا لَّمِنَ الْمُرْسَلِيْنَۗ ","134":"اِذْ نَجَّيْنٰهُ وَاَهْلَهٗٓ اَجْمَعِيْۙنَ","135":"اِلَّا عَجُوْزًا فِى الْغٰبِرِيْنَ","136":"ثُمَّ دَمَّرْنَا الْاٰخَرِيْنَ","137":"وَاِنَّكُمْ لَتَمُرُّوْنَ عَلَيْهِمْ مُّصْبِحِيْنَۙ","138":"وَبِالَّيْلِۗ اَفَلَا تَعْقِلُوْنَ ࣖ ","139":"وَاِنَّ يُوْنُسَ لَمِنَ الْمُرْسَلِيْنَۗ ","140":"اِذْ اَبَقَ اِلَى الْفُلْكِ الْمَشْحُوْنِۙ","141":"فَسَاهَمَ فَكَانَ مِنَ الْمُدْحَضِيْنَۚ","142":"فَالْتَقَمَهُ الْحُوْتُ وَهُوَ مُلِيْمٌ ","143":"فَلَوْلَآ اَنَّهٗ كَانَ مِنَ الْمُسَبِّحِيْنَ ۙ","144":"لَلَبِثَ فِيْ بَطْنِهٖٓ اِلٰى يَوْمِ يُبْعَثُوْنَۚ ","145":"فَنَبَذْنٰهُ بِالْعَرَاۤءِ وَهُوَ سَقِيْمٌ ۚ ","146":"وَاَنْۢبَتْنَا عَلَيْهِ شَجَرَةً مِّنْ يَّقْطِيْنٍۚ ","147":"وَاَرْسَلْنٰهُ اِلٰى مِائَةِ اَلْفٍ اَوْ يَزِيْدُوْنَۚ ","148":"فَاٰمَنُوْا فَمَتَّعْنٰهُمْ اِلٰى حِيْنٍ","149":"فَاسْتَفْتِهِمْ اَلِرَبِّكَ الْبَنَاتُ وَلَهُمُ الْبَنُوْنَۚ ","150":"اَمْ خَلَقْنَا الْمَلٰۤىِٕكَةَ اِنَاثًا وَّهُمْ شَاهِدُوْنَ","151":"اَلَآ اِنَّهُمْ مِّنْ اِفْكِهِمْ لَيَقُوْلُوْنَۙ","152":"وَلَدَ اللّٰهُ ۙوَاِنَّهُمْ لَكٰذِبُوْنَۙ","153":"اَصْطَفَى الْبَنَاتِ عَلَى الْبَنِيْنَۗ ","154":"مَا لَكُمْۗ كَيْفَ تَحْكُمُوْنَ","155":"اَفَلَا تَذَكَّرُوْنَۚ ","156":"اَمْ لَكُمْ سُلْطٰنٌ مُّبِيْنٌۙ","157":"فَأْتُوْا بِكِتٰبِكُمْ اِنْ كُنْتُمْ صٰدِقِيْنَ","158":"وَجَعَلُوْا بَيْنَهٗ وَبَيْنَ الْجِنَّةِ نَسَبًا ۗوَلَقَدْ عَلِمَتِ الْجِنَّةُ اِنَّهُمْ لَمُحْضَرُوْنَۙ","159":"سُبْحٰنَ اللّٰهِ عَمَّا يَصِفُوْنَۙ","160":E,"161":"فَاِنَّكُمْ وَمَا تَعْبُدُوْنَۙ","162":"مَآ اَنْتُمْ عَلَيْهِ بِفَاتِنِيْنَۙ","163":"اِلَّا مَنْ هُوَ صَالِ الْجَحِيْمِ","164":"وَمَا مِنَّآ اِلَّا لَهٗ مَقَامٌ مَّعْلُوْمٌۙ","165":"وَاِنَّا لَنَحْنُ الصَّۤافُّوْنَۖ","166":"وَاِنَّا لَنَحْنُ الْمُسَبِّحُوْنَ","167":"وَاِنْ كَانُوْا لَيَقُوْلُوْنَۙ","168":"لَوْ اَنَّ عِنْدَنَا ذِكْرًا مِّنَ الْاَوَّلِيْنَۙ","169":"لَكُنَّا عِبَادَ اللّٰهِ الْمُخْلَصِيْنَ","170":"فَكَفَرُوْا بِهٖۚ فَسَوْفَ يَعْلَمُوْنَ","171":"وَلَقَدْ سَبَقَتْ كَلِمَتُنَا لِعِبَادِنَا الْمُرْسَلِيْنَ ۖ","172":"اِنَّهُمْ لَهُمُ الْمَنْصُوْرُوْنَۖ","173":"وَاِنَّ جُنْدَنَا لَهُمُ الْغٰلِبُوْنَ","174":"فَتَوَلَّ عَنْهُمْ حَتّٰى حِيْنٍۙ","175":"وَّاَبْصِرْهُمْۗ فَسَوْفَ يُبْصِرُوْنَ","176":"اَفَبِعَذَابِنَا يَسْتَعْجِلُوْنَ","177":"فَاِذَا نَزَلَ بِسَاحَتِهِمْ فَسَاۤءَ صَبَاحُ الْمُنْذَرِيْنَ","178":"وَتَوَلَّ عَنْهُمْ حَتّٰى حِيْنٍۙ","179":"وَّاَبْصِرْۗ فَسَوْفَ يُبْصِرُوْنَ","180":"سُبْحٰنَ رَبِّكَ رَبِّ الْعِزَّةِ عَمَّا يَصِفُوْنَۚ","181":"وَسَلٰمٌ عَلَى الْمُرْسَلِيْنَۚ ","182":"وَالْحَمْدُ لِلّٰهِ رَبِّ الْعٰلَمِيْنَ ࣖ"},translations:{id:{name:"Barisan-Barisan",text:{"1":"Demi (rombongan malaikat) yang berbaris bersaf-saf,","2":"demi (rombongan) yang mencegah dengan sungguh-sungguh, ","3":"demi (rombongan) yang membacakan peringatan,","4":"sungguh, Tuhanmu benar-benar Esa.","5":"Tuhan langit dan bumi dan apa yang berada di antara keduanya dan Tuhan tempat-tempat terbitnya matahari.","6":"Sesungguhnya Kami telah menghias langit dunia (yang terdekat), dengan hiasan bintang-bintang.","7":"Dan (Kami) telah menjaganya dari setiap setan yang durhaka,","8":"mereka (setan-setan itu) tidak dapat mendengar (pembicaraan) para malaikat dan mereka dilempari dari segala penjuru,","9":"untuk mengusir mereka dan mereka akan mendapat azab yang kekal,","10":"kecuali (setan) yang mencuri (pembicaraan); maka ia dikejar oleh bintang yang menyala.","11":"Maka tanyakanlah kepada mereka (musyrik Mekah), “Apakah penciptaan mereka yang lebih sulit ataukah apa yang telah Kami ciptakan itu?” Sesungguhnya Kami telah menciptakan mereka dari tanah liat.","12":"Bahkan engkau (Muhammad) menjadi heran (terhadap keingkaran mereka) dan mereka menghinakan (engkau).","13":"Dan apabila mereka diberi peringatan, mereka tidak mengindahkannya.","14":"Dan apabila mereka melihat suatu tanda (kebesaran) Allah, mereka memperolok-olokkan.","15":"Dan mereka berkata, “Ini tidak lain hanyalah sihir yang nyata.","16":"Apabila kami telah mati dan telah menjadi tanah dan tulang-belulang, apakah benar kami akan dibangkitkan (kembali)?","17":"dan apakah nenek moyang kami yang telah terdahulu (akan dibangkitkan pula)?”","18":"Katakanlah (Muhammad), “Ya, dan kamu akan terhina.”","19":"Maka sesungguhnya kebangkitan itu hanya dengan satu teriakan saja; maka seketika itu mereka melihatnya.","20":"Dan mereka berkata, “Alangkah celaka kami! (Kiranya) inilah hari pembalasan itu.”","21":"Inilah hari keputusan yang dahulu kamu dustakan.","22":"(Diperintahkan kepada malaikat), “Kumpulkanlah orang-orang yang zalim beserta teman sejawat mereka dan apa yang dahulu mereka sembah,","23":"selain Allah, lalu tunjukkanlah kepada mereka jalan ke neraka.","24":"Tahanlah mereka (di tempat perhentian), sesungguhnya mereka akan ditanya,","25":"”Mengapa kamu tidak tolong-menolong?”","26":"Bahkan mereka pada hari itu menyerah (kepada keputusan Allah).","27":"Dan sebagian mereka menghadap kepada sebagian yang lain saling berbantah-bantahan.","28":"Sesungguhnya (pengikut-pengikut) mereka berkata (kepada pemimpin-pemimpin mereka), “Kamulah yang dahulu datang kepada kami dari kanan.”","29":"(Pemimpin-pemimpin) mereka menjawab, “(Tidak), bahkan kamulah yang tidak (mau) menjadi orang mukmin,","30":"sedangkan kami tidak berkuasa terhadapmu, bahkan kamu menjadi kaum yang melampaui batas.","31":"Maka pantas putusan (azab) Tuhan menimpa kita; pasti kita akan merasakan (azab itu).","32":"Maka kami telah menyesatkan kamu, sesungguhnya kami sendiri, orang-orang yang sesat.”","33":"Maka sesungguhnya mereka pada hari itu bersama-sama merasakan azab.","34":"Sungguh, demikianlah Kami memperlakukan terhadap orang-orang yang berbuat dosa.","35":"Sungguh, dahulu apabila dikatakan kepada mereka, “La ilaha illallah” (Tidak ada tuhan selain Allah), mereka menyombongkan diri,","36":"dan mereka berkata, “Apakah kami harus meninggalkan sesembahan kami karena seorang penyair gila?”","37":"Padahal dia (Muhammad) datang dengan membawa kebenaran dan membenarkan rasul-rasul (sebelumnya).","38":"Sungguh, kamu pasti akan merasakan azab yang pedih.","39":"Dan kamu tidak diberi balasan melainkan terhadap apa yang telah kamu kerjakan,","40":"tetapi hamba-hamba Allah yang dibersihkan (dari dosa),","41":"mereka itu memperoleh rezeki yang sudah ditentukan,","42":"(yaitu) buah-buahan. Dan mereka orang yang dimuliakan,","43":"di dalam surga-surga yang penuh kenikmatan,","44":"(mereka duduk) berhadap-hadapan di atas dipan-dipan.","45":"Kepada mereka diedarkan gelas (yang berisi air) dari mata air (surga),","46":"(warnanya) putih bersih, sedap rasanya bagi orang-orang yang minum.","47":"Tidak ada di dalamnya (unsur) yang memabukkan dan mereka tidak mabuk karenanya.","48":"Dan di sisi mereka ada (bidadari-bidadari) yang bermata indah, dan membatasi pandangannya,","49":"seakan-akan mereka adalah telur yang tersimpan dengan baik.","50":"Lalu mereka berhadap-hadapan satu sama lain sambil bercakap-cakap.","51":"Berkatalah salah seorang di antara mereka, “Sesungguhnya aku dahulu (di dunia) pernah mempunyai seorang teman,","52":"yang berkata, “Apakah sesungguhnya kamu termasuk orang-orang yang membenarkan (hari berbangkit)?","53":"Apabila kita telah mati dan telah menjadi tanah dan tulang-belulang, apakah kita benar-benar (akan dibangkitkan) untuk diberi pembalasan?”","54":"Dia berkata, “Maukah kamu meninjau (temanku itu)?”","55":"Maka dia meninjaunya, lalu dia melihat (teman)nya itu di tengah-tengah neraka yang menyala-nyala.","56":"Dia berkata, “Demi Allah, engkau hampir saja mencelakakanku,","57":"dan sekiranya bukan karena nikmat Tuhanku pastilah aku termasuk orang-orang yang diseret (ke neraka).”","58":"Maka apakah kita tidak akan mati?","59":"Kecuali kematian kita yang pertama saja (di dunia), dan kita tidak akan diazab (di akhirat ini)?”","60":"Sungguh, ini benar-benar kemenangan yang agung.","61":"Untuk (kemenangan) serupa ini, hendaklah beramal orang-orang yang mampu beramal.","62":"Apakah (makanan surga) itu hidangan yang lebih baik ataukah pohon zaqqum.","63":"Sungguh, Kami menjadikannya (pohon zaqqum itu) sebagai azab bagi orang-orang zalim.","64":"Sungguh, itu adalah pohon yang keluar dari dasar neraka Jahim,","65":"Mayangnya seperti kepala-kepala setan.","66":"Maka sungguh, mereka benar-benar memakan sebagian darinya (buah pohon itu), dan mereka memenuhi perutnya dengan buahnya (zaqqum).","67":"Kemudian sungguh, setelah makan (buah zaqqum) mereka mendapat minuman yang dicampur dengan air yang sangat panas.","68":"Kemudian pasti tempat kembali mereka ke neraka Jahim.","69":"Sesungguhnya mereka mendapati nenek moyang mereka dalam keadaan sesat, ","70":"lalu mereka tergesa-gesa mengikuti jejak (nenek moyang) mereka.","71":"Dan sungguh, sebelum mereka (Suku Quraisy), telah sesat sebagian besar dari orang-orang yang dahulu,","72":"dan sungguh, Kami telah mengutus (rasul) pemberi peringatan di kalangan mereka.","73":"Maka perhatikanlah bagaimana kesudahan orang-orang yang diberi peringatan itu,","74":F,"75":"Dan sungguh, Nuh telah berdoa kepada Kami, maka sungguh, Kamilah sebaik-baik yang memperkenankan doa.","76":"Kami telah menyelamatkan dia dan pengikutnya dari bencana yang besar.","77":"Dan Kami jadikan anak cucunya orang-orang yang melanjutkan keturunan.","78":"Dan Kami abadikan untuk Nuh (pujian) di kalangan orang-orang yang datang kemudian;","79":"”Kesejahteraan (Kami limpahkan) atas Nuh di seluruh alam.”","80":"Sungguh, demikianlah Kami memberi balasan kepada orang-orang yang berbuat baik.","81":"Sungguh, dia termasuk di antara hamba-hamba Kami yang beriman.","82":"Kemudian Kami tenggelamkan yang lain.","83":"Dan sungguh, Ibrahim termasuk golongannya (Nuh).","84":"(Ingatlah) ketika dia datang kepada Tuhannya dengan hati yang suci,","85":"(ingatlah) ketika dia berkata kepada ayahnya dan kaumnya, “Apakah yang kamu sembah itu?","86":"Apakah kamu menghendaki kebohongan dengan sesembahan selain Allah itu?","87":"Maka bagaimana anggapanmu terhadap Tuhan seluruh alam?”","88":"Lalu dia memandang sekilas ke bintang-bintang,","89":"kemudian dia (Ibrahim) berkata, “Sesungguhnya aku sakit.”","90":"Lalu mereka berpaling dari dia dan pergi meninggalkannya.","91":"Kemudian dia (Ibrahim) pergi dengan diam-diam kepada berhala-berhala mereka; lalu dia berkata, “Mengapa kamu tidak makan?","92":"Mengapa kamu tidak menjawab?”","93":"Lalu dihadapinya (berhala-berhala) itu sambil memukulnya dengan tangan kanannya.","94":"Kemudian mereka (kaumnya) datang bergegas kepadanya.","95":"Dia (Ibrahim) berkata, “Apakah kamu menyembah patung-patung yang kamu pahat itu?","96":"Padahal Allah-lah yang menciptakan kamu dan apa yang kamu perbuat itu.”","97":"Mereka berkata, “Buatlah bangunan (perapian) untuknya (membakar Ibrahim); lalu lemparkan dia ke dalam api yang menyala-nyala itu.”","98":"Maka mereka bermaksud memperdayainya dengan (membakar)nya, (namun Allah menyelamatkannya), lalu Kami jadikan mereka orang-orang yang hina.","99":"Dan dia (Ibrahim) berkata, “Sesungguhnya aku harus pergi (menghadap) kepada Tuhanku, Dia akan memberi petunjuk kepadaku.","100":"Ya Tuhanku, anugerahkanlah kepadaku (seorang anak) yang termasuk orang yang saleh.”","101":"Maka Kami beri kabar gembira kepadanya dengan (kelahiran) seorang anak yang sangat sabar (Ismail).","102":"Maka ketika anak itu sampai (pada umur) sanggup berusaha bersamanya, (Ibrahim) berkata, “Wahai anakku! Sesungguhnya aku bermimpi bahwa aku menyembelihmu. Maka pikirkanlah bagaimana pendapatmu!” Dia (Ismail) menjawab, “Wahai ayahku! Lakukanlah apa yang diperintahkan (Allah) kepadamu; insya Allah engkau akan mendapatiku termasuk orang yang sabar.”","103":"Maka ketika keduanya telah berserah diri dan dia (Ibrahim) membaringkan anaknya atas pelipis(nya), (untuk melaksanakan perintah Allah).","104":"Lalu Kami panggil dia, “Wahai Ibrahim!","105":"sungguh, engkau telah membenarkan mimpi itu.” Sungguh, demikianlah Kami memberi balasan kepada orang-orang yang berbuat baik.","106":"Sesungguhnya ini benar-benar suatu ujian yang nyata.","107":"Dan Kami tebus anak itu dengan seekor sembelihan yang besar.","108":"Dan Kami abadikan untuk Ibrahim (pujian) di kalangan orang-orang yang datang kemudian,","109":"”Selamat sejahtera bagi Ibrahim.”","110":i,"111":G,"112":"Dan Kami beri dia kabar gembira dengan (kelahiran) Ishak seorang nabi yang termasuk orang-orang yang saleh.","113":"Dan Kami limpahkan keberkahan kepadanya dan kepada Ishak. Dan di antara keturunan keduanya ada yang berbuat baik dan ada (pula) yang terang-terangan berbuat zalim terhadap dirinya sendiri.","114":"Dan sungguh, Kami telah melimpahkan nikmat kepada Musa dan Harun.","115":"Dan Kami selamatkan keduanya dan kaumnya dari bencana yang besar,","116":"dan Kami tolong mereka, sehingga jadilah mereka orang-orang yang menang.","117":"Dan Kami berikan kepada keduanya Kitab yang sangat jelas,","118":"dan Kami tunjukkan keduanya jalan yang lurus.","119":"Dan Kami abadikan untuk keduanya (pujian) di kalangan orang-orang yang datang kemudian,","120":"”Selamat sejahtera bagi Musa dan Harun.”","121":i,"122":"Sungguh, keduanya termasuk hamba-hamba Kami yang beriman.","123":"Dan sungguh, Ilyas benar-benar termasuk salah seorang rasul.","124":"(Ingatlah) ketika dia berkata kepada kaumnya, “Mengapa kamu tidak bertakwa?","125":"Patutkah kamu menyembah Ba’l dan kamu tinggalkan (Allah) sebaik-baik pencipta.","126":"(Yaitu) Allah Tuhanmu dan Tuhan nenek moyangmu yang terdahulu?”","127":"Tetapi mereka mendustakannya (Ilyas), maka sungguh, mereka akan diseret (ke neraka),","128":"kecuali hamba-hamba Allah yang disucikan (dari dosa),","129":"Dan Kami abadikan untuk Ilyas (pujian) di kalangan orang-orang yang datang kemudian.","130":"”Selamat sejahtera bagi Ilyas.”","131":i,"132":G,"133":"Dan sungguh, Lut benar-benar termasuk salah seorang rasul.","134":"(Ingatlah) ketika Kami telah menyelamatkan dia dan pengikutnya semua,","135":"kecuali seorang perempuan tua (istrinya) bersama-sama orang yang tinggal (di kota).","136":"Kemudian Kami binasakan orang-orang yang lain.","137":"Dan sesungguhnya kamu (penduduk Mekah) benar-benar akan melalui (bekas-bekas) mereka pada waktu pagi,","138":"dan pada waktu malam. Maka mengapa kamu tidak mengerti?","139":"Dan sungguh, Yunus benar-benar termasuk salah seorang rasul,","140":"(ingatlah) ketika dia lari, ke kapal yang penuh muatan,","141":"kemudian dia ikut diundi ternyata dia termasuk orang-orang yang kalah (dalam undian).","142":"Maka dia ditelan oleh ikan besar dalam keadaan tercela.","143":"Maka sekiranya dia tidak termasuk orang yang banyak berzikir (bertasbih) kepada Allah,","144":"niscaya dia akan tetap tinggal di perut (ikan itu) sampai hari kebangkitan.","145":"Kemudian Kami lemparkan dia ke daratan yang tandus, sedang dia dalam keadaan sakit.","146":"Kemudian untuk dia Kami tumbuhkan sebatang pohon dari jenis labu.","147":"Dan Kami utus dia kepada seratus ribu (orang) atau lebih,","148":"sehingga mereka beriman, karena itu Kami anugerahkan kenikmatan hidup kepada mereka hingga waktu tertentu.","149":"Maka tanyakanlah (Muhammad) kepada mereka (orang-orang kafir Mekah), “Apakah anak-anak perempuan itu untuk Tuhanmu sedangkan untuk mereka anak-anak laki-laki?”","150":"atau apakah Kami menciptakan malaikat-malaikat berupa perempuan sedangkan mereka menyaksikan(nya)?","151":"Ingatlah, sesungguhnya di antara kebohongannya mereka benar-benar mengatakan,","152":"”Allah mempunyai anak.” Dan sungguh, mereka benar-benar pendusta,","153":"apakah Dia (Allah) memilih anak-anak perempuan daripada anak-anak laki-laki?","154":"Mengapa kamu ini? Bagaimana (caranya) kamu menetapkan?","155":"Maka mengapa kamu tidak memikirkan?","156":"Ataukah kamu mempunyai bukti yang jelas?","157":"(Kalau begitu) maka bawalah kitabmu jika kamu orang yang benar.","158":"Dan mereka mengadakan (hubungan) nasab (keluarga) antara Dia (Allah) dan jin. Dan sungguh, jin telah mengetahui bahwa mereka pasti akan diseret (ke neraka),","159":"Mahasuci Allah dari apa yang mereka sifatkan,","160":F,"161":"Maka sesungguhnya kamu dan apa yang kamu sembah itu,","162":"tidak akan dapat menyesatkan (seseorang) terhadap Allah,","163":"kecuali orang-orang yang akan masuk ke neraka Jahim.","164":"Dan tidak satu pun di antara kami (malaikat) melainkan masing-masing mempunyai kedudukan tertentu,","165":"dan sesungguhnya kami selalu teratur dalam barisan (dalam melaksanakan perintah Allah).","166":"Dan sungguh, kami benar-benar terus bertasbih (kepada Allah).","167":"Dan sesungguhnya mereka (orang kafir Mekah) benar-benar pernah berkata,","168":"”Sekiranya di sisi kami ada sebuah kitab dari (kitab-kitab yang diturunkan) kepada orang-orang dahulu,","169":"tentu kami akan menjadi hamba Allah yang disucikan (dari dosa).”","170":"Tetapi ternyata mereka mengingkarinya (Al-Qur'an); maka kelak mereka akan mengetahui (akibat keingkarannya itu).","171":"Dan sungguh, janji Kami telah tetap bagi hamba-hamba Kami yang menjadi rasul,","172":"(yaitu) mereka itu pasti akan mendapat pertolongan.","173":"Dan sesungguhnya bala tentara Kami itulah yang pasti menang.","174":"Maka berpalinglah engkau (Muhammad) dari mereka sampai waktu tertentu,","175":"dan perlihatkanlah kepada mereka, maka kelak mereka akan melihat (azab itu).","176":"Maka apakah mereka meminta agar azab Kami disegerakan?","177":"Maka apabila (siksaan) itu turun di halaman mereka, maka sangat buruklah pagi hari bagi orang-orang yang diperingatkan itu.","178":"Dan berpalinglah engkau dari mereka sampai waktu tertentu. ","179":"Dan perlihatkanlah, maka kelak mereka akan melihat (azab itu).","180":"Mahasuci Tuhanmu, Tuhan Yang Mahaperkasa dari sifat yang mereka katakan.","181":"Dan selamat sejahtera bagi para rasul.","182":"Dan segala puji bagi Allah Tuhan seluruh alam."}}},tafsir:{id:{kemenag:{name:"Kemenag",source:"Aplikasi Quran Kementrian Agama Republik Indonesia",text:{"1":"Di dalam Al-Qur'an terdapat banyak kata-kata untuk bersumpah, yang maksudnya untuk menguatkan kesan yang diberikan dalam ayat-ayatnya. Kata-kata yang dipakai untuk bersumpah itu pastilah kata-kata yang mempunyai arti penting yang menunjukkan kebesaran dan kekuasaan-Nya, misalnya: \"demi matahari\", \"demi malam\", dan sebagainya.\n\nPada ayat ini, Allah berfirman, \"Demi (rombongan malaikat) yang berbaris bersaf-saf.\" Maksudnya ialah demi malaikat-malaikat yang berbaris dalam saf-saf yang lurus dan teratur, dalam melakukan ibadah dan tugas-tugas lain yang diperintahkan Allah. Hal ini mempunyai arti bahwa para malaikat selalu disiplin, teratur, dan rapi dalam melaksanakan tugas dari Allah. Rasulullah bersabda:\n\nRasulullah bersabda, \" Mengapa kamu tidak berbaris seperti malaikat berbaris di hadapan Allah?\" Kami bertanya, \"Bagaimana berbarisnya malaikat di hadapan Allah?\" Rasulullah menjawab, \"Malaikat menyempurnakan barisan depan kemudian merapatkan dan merapikannya.\" (Riwayat Abu Dawud., Ibnu Majah, dan A.hmad dari Jabir bin Samurah)","2":"Pada ayat ini, Allah bersumpah dengan menyebut para malaikat yang menghardik untuk melarang makhluk sedemikian rupa dari perbuatan-perbuatan maksiat. Malaikat adalah makhluk Allah yang sangat patuh dan taat kepada perintah dan larangan-Nya. Oleh sebab itu, mereka tidak senang melihat makhluk lain yang berbuat kemaksiatan, melanggar larangan Allah, dan tidak melaksanakan apa yang diperintahkan-Nya. Mereka menghardiknya seperti seorang gembala yang menghardik untuk menghalau ternaknya.","3":"Allah bersumpah dengan menyebutkan malaikat yang senantiasa membacakan zikir atau ayat-ayat-Nya. Pernyataan ini berarti bahwa Al-Qur'an diturunkan kepada Nabi Muhammad adalah dengan perantaraan malaikat. Demikian pula wahyu Allah yang diturunkan kepada para rasul sebelum Nabi Muhammad, juga disampaikan dengan perantaraan malaikat.","4":"Allah menegaskan pada ayat ini bahwa Dia benar-benar Maha Esa. Ia tidak berserikat dengan siapa pun dalam menciptakan, memelihara, dan menguasai segala makhluk-Nya. Tuhan yang pantas ditaati dan disembah memang hanya satu, yaitu Allah swt. Dalam Surah al-Ikhlash, jelas Allah menerangkan zat-Nya: huwa Allah ahad, Allah ash-shamad.","5":"Kata-kata sumpah yang terdapat pada ayat-ayat yang lalu diikuti dengan keterangan dan pembuktian tentang kekuasaan Allah. Maka pada ayat ini Allah menegaskan bahwa Dia adalah Tuhan yang menciptakan dan memelihara semua langit dan bumi, serta segala apa yang berada di antara keduanya. Dia pula yang menguasai seluruh penjuru alam ini, antara lain tempat-tempat terbitnya matahari setiap hari sepanjang tahun. Ini semuanya menunjukkan kekuasaan dan kebesaran-Nya, serta keindahan dari semua ciptaan-Nya yang tak dapat ditiru oleh siapa pun juga.","6":"Selanjutnya Allah menambahkan lagi bukti-bukti tentang kekuasaan-Nya, yaitu bahwa Dia telah menghias langit dengan planet-planet yang demikian indah. Barang siapa memandang langit di waktu malam yang cerah dan penuh bintang, serta bulan yang bersinar lemah, semestinya merasa sangat takjub dan dari mulutnya akan terucap kata-kata \"Allahu Akbar\", Allah Mahabesar.","7":"Di samping ciptaan-ciptaan-Nya yang demikian menakjubkan, Allah memelihara semua makhluk-Nya itu dari apa yang akan merusaknya. Ia memelihara manusia dari godaan setan yang senantiasa membujuk manusia untuk melakukan kemaksiatan, yang akan menjerumuskan kepada kebinasaan dan kemurkaan-Nya. Untuk itu, Allah telah memberikan petunjuk, berupa agama yang benar, yang akan menjaga manusia dari godaan setan. Hanya manusia yang ingkar yang dapat ditundukkan oleh rayuan setan yang mencelakakan itu.","8":"Pada ayat ini Allah menjelaskan bahwa setan tidak dapat mendengar pembicaraan malaikat. Setan-setan itu dilempari dari segala penjuru karena ulah mereka yang suka merusak tatanan alam dan menggoda manusia untuk berbuat maksiat kepada Allah.","9":"Lemparan itu untuk mengusir setan-setan tersebut karena mereka makhluk yang ingkar dan sesat, dan selalu berusaha menyesatkan manusia, dan membujuk manusia supaya ingkar kepada Tuhan. Untuk mereka telah disediakan azab yang akan berlangsung selama-lamanya di neraka.","10":"Akan tetapi, bila ada di antara setan-setan yang sengaja mendengar-dengarkan pembicaraan para malaikat, ia segera diburu dengan suluh api yang menyala-nyala. Ini menunjukkan betapa terkutuknya setan-setan itu, sehingga mereka merupakan makhluk yang paling dibenci dan diusir di mana-mana. Oleh sebab itu, manusia tidak patut takluk kepada rayuan dan godaan mereka.","11":H,"12":H,"13":j,"14":j,"15":j,"16":I,"17":I,"18":J,"19":J,"20":K,"21":K,"22":L,"23":L,"24":k,"25":k,"26":k,"27":M,"28":M,"29":N,"30":N,"31":O,"32":O,"33":P,"34":P,"35":Q,"36":Q,"37":b,"38":b,"39":b,"40":b,"41":R,"42":R,"43":S,"44":S,"45":l,"46":l,"47":l,"48":T,"49":T,"50":"Pada ayat ini, Allah menerangkan bahwa orang-orang mukmin dalam surga duduk saling berhadap-hadapan dan berbincang-bincang satu sama lain sambil menikmati minuman yang disuguhkan kepada mereka. Betapa nikmatnya mengenang masa lampau mereka sewaktu dalam kesenangan dan ketenteraman hidup dalam surga. Mereka berbincang-bincang tentang pelbagai keutamaan dan pengalaman di dunia.","51":m,"52":m,"53":m,"54":c,"55":c,"56":c,"57":c,"58":d,"59":d,"60":d,"61":d,"62":U,"63":U,"64":V,"65":V,"66":n,"67":n,"68":n,"69":W,"70":W,"71":"Pada ayat ini Allah menerangkan bahwa sebagian besar umat-umat zaman dahulu sebelum Nabi Muhammad saw telah sesat. Mereka menyembah berhala dan mempersekutukannya dengan Tuhan dan seringkali berbuat kerusakan di atas bumi dengan mengadakan peperangan. Hidup mereka didasarkan atas hawa nafsu dan angkara murka. Pemimpin-pemimpin mereka dan pembesar-pembesar negara berlaku aniaya dan menindas rakyat dengan kerja paksa membangun istana-istana dan kuil-kuil tempat penyembahan berhala dan makam-makam raja. Bahkan ada di antara mereka yang mengaku Tuhan dan rakyat dipaksa menyembah mereka. Demikianlah kisah-kisah umat-umat zaman dahulu seperti kaum 'Ad, Samud, raja Namrud, Fir'aun, dan lain-lainnya.","72":"Lalu Allah mengutus kepada umat-umat dahulu itu nabi-nabi dan rasul-rasul untuk menegakkan agama tauhid, menjalankan amar ma'ruf nahi munkar. Nabi-nabi itu merupakan pemberi peringatan yang berjuang untuk meluruskan jalan hidup manusia yang menyimpang dari fitrah kejadiannya. Mereka menunjukkan kepada kaumnya jalan yang hak dan yang batil, jalan yang baik dan yang buruk, serta mengingatkan kepada mereka azab yang akan menimpa bila mereka tidak mau meninggalkan kesesatan dan tidak mau tunduk kepada kebenaran yang dibawa rasul-rasul.\n\nTetapi nabi-nabi dan rasul-rasul itu ditentang, didustakan dan dimusuhi, bahkan ada di antara mereka yang dianiaya sampai dibunuh. Kehadiran para rasul di tengah-tengah mereka itu dipandang sebagai gangguan bagi kemantapan kehidupan mereka, karena itu mereka tetap dalam kesesatan dan kegelapan. Kesudahannya datanglah azab Tuhan menimpa mereka sebagaimana diterangkan Allah dalam firman-Nya:\n\nMaka adapun kaum Samud, mereka telah dibinasakan dengan suara yang sangat keras, sedangkan kaum 'Ad, mereka telah dibinasakan dengan angin topan yang sangat dingin. (al-haqqah\u002F69: 5-6)","73":"(73-74) Pada ayat ini Allah menyerukan kepada Rasulullah saw dan umatnya untuk memperhatikan nasib kaum-kaum yang mendustakan rasul-rasul itu. Bekas-bekas kehancuran mereka itu masih dapat disaksikan berupa peninggalan purbakala. Dengan memperhatikan sejarah umat dahulu, mereka akan memperoleh pelajaran untuk merenungkan peringatan-peringatan yang disampaikan oleh Nabi Muhammad saw.\n\nTidaklah semua orang yang berada dalam kaum itu mengingkari utusan Tuhan yang datang kepada mereka dan mengalami siksaan sebagai balasan terhadap keingkaran kaum itu. Tetapi di antara mereka terdapat hamba-hamba Allah yang beriman kepada-Nya dengan setulus hati beramal saleh, menaati segala perintah dan larangan-Nya. Mereka diselamatkan dari siksaan dan dianugerahi kebahagiaan dunia dan akhirat.","74":"Pada ayat ini Allah menyerukan kepada Rasulullah saw dan umatnya untuk memperhatikan nasib kaum-kaum yang mendustakan rasul-rasul itu. Bekas-bekas kehancuran mereka itu masih dapat disaksikan berupa peninggalan purbakala. Dengan memperhatikan sejarah umat dahulu, mereka akan memperoleh pelajaran untuk merenungkan peringatan-peringatan yang disampaikan oleh Nabi Muhammad saw.\n\nTidaklah semua orang yang berada dalam kaum itu mengingkari utusan Tuhan yang datang kepada mereka dan mengalami siksaan sebagai balasan terhadap keingkaran kaum itu. Tetapi di antara mereka terdapat hamba-hamba Allah yang beriman kepada-Nya dengan setulus hati beramal saleh, menaati segala perintah dan larangan-Nya. Mereka diselamatkan dari siksaan dan dianugerahi kebahagiaan dunia dan akhirat.","75":"Ayat ini menerangkan bahwa Nabi Nuh berdoa kepada Tuhan supaya memberikan pertolongan kepadanya terhadap ancaman penganiayaan dari kaumnya. Bahkan mereka sudah bermaksud membunuhnya sewaktu dia menyeru mereka kepada agama tauhid.\n\nMeskipun cukup lama Nabi Nuh menyeru kaumnya siang dan malam, secara sembunyi dan terang-terangan, namun hanya sedikit di antara mereka yang beriman. Setiap kali diberi peringatan dan pengajaran, mereka bertambah jauh dari agama dan tambah sengit permusuhannya kepada Nabi Nuh. Hal itu menyebabkan Nabi Nuh sangat kecewa lalu dia berdoa kepada Tuhan agar orang-orang kafir itu segera dibinasakan. Firman Allah:\n\nDan Nuh berkata, \"Ya Tuhanku, janganlah Engkau biarkan seorang pun di antara orang-orang kafir itu tinggal di atas bumi. Sesungguhnya jika Engkau biarkan mereka tinggal, niscaya mereka akan menyesatkan hamba-hamba-Mu, dan mereka hanya akan melahirkan anak-anak yang jahat dan tidak tahu bersyukur. (Nuh\u002F71: 26-27)\n\nAllah mengabulkan doa Nabi Nuh itu. Allah menyebutkan dirinya sebagai Zat yang paling baik dalam mengabulkan doa. Pengabulan itu sangat diharapkan oleh Nabi Nuh pada saat itu karena kaumnya mendustakan dan menentangnya.","76":o,"77":o,"78":o,"79":"Kemudian disebutkan salam kesejahteraan bagi Nuh \"Salamun 'ala Nuhin\" sebagai pengajaran bagi para malaikat, jin, dan manusia supaya mereka juga mengucapkan salam sejahtera kepada Nuh sampai hari Kiamat. Allah berfirman:\n\nDifirmankan, \"Wahai Nuh! Turunlah dengan selamat sejahtera dan penuh keberkahan dari Kami, bagimu dan bagi semua umat (mukmin) yang bersamamu. (Hud\u002F11: 48)\n\nDengan ucapan salam sejahtera untuk Nuh oleh umat manusia dari masa ke masa maka nama Nabi Nuh akan tetap harum dan diingat sepanjang masa.","80":p,"81":p,"82":p,"83":"Ayat ini menerangkan bahwa Nabi Ibrahim termasuk keturunan dan penerus risalah Nabi Nuh. Beliau mengikuti jejak Nabi Nuh dalam memegang ajaran tauhid, meyakini akan adanya hari Kiamat, memperjuangkan penyebaran agama tauhid dan kepercayaan akan hari Kiamat, melaksanakan amar ma'ruf nahi munkar serta tabah dan sabar dalam menghadapi permusuhan kaum kafir.","84":"Ayat ini mempertegas lagi kemurnian jiwa Nabi Ibrahim. Dia menghadapkan jiwanya kepada Tuhan Yang Maha Esa dengan penuh keikhlasan, bersih dari kemusyrikan, terlepas dari kepentingan kehidupan duniawi, dan jauh dari perasaan buruk lainnya yang dapat mengganggu jiwanya.","85":q,"86":q,"87":q,"88":r,"89":r,"90":r,"91":e,"92":e,"93":e,"94":e,"95":a,"96":a,"97":a,"98":a,"99":a,"100":X,"101":X,"102":"Kemudian ayat ini menerangkan ujian yang berat bagi Ibrahim. Allah memerintahkan kepadanya agar menyembelih anak satu-satunya sebagai korban di sisi Allah. Ketika itu, Ismail mendekati masa balig atau remaja, suatu tingkatan umur sewaktu anak dapat membantu pekerjaan orang tuanya. Menurut al-Farra', usia Ismail pada saat itu 13 tahun. Ibrahim dengan hati yang sedih memberitahukan kepada Ismail tentang perintah Tuhan yang disampaikan kepadanya melalui mimpi. Dia meminta pendapat anaknya mengenai perintah itu. Perintah Tuhan itu berkenaan dengan penyembelihan diri anaknya sendiri, yang merupakan cobaan yang besar bagi orang tua dan anak. \n\nSesudah mendengarkan perintah Tuhan itu, Ismail dengan segala kerendahan hati berkata kepada ayahnya agar melaksanakan segala apa yang diperintahkan kepadanya. Dia akan taat, rela, dan ikhlas menerima ketentuan Tuhan serta menjunjung tinggi segala perintah-Nya dan pasrah kepada-Nya. Ismail yang masih sangat muda itu mengatakan kepada orang tuanya bahwa dia tidak akan gentar menghadapi cobaan itu, tidak akan ragu menerima qada dan qadar Tuhan. Dia dengan tabah dan sabar akan menahan derita penyembelihan itu. Sikap Ismail sangat dipuji oleh Allah dalam firman-Nya:\n\nDan ceritakanlah (Muhammad), kisah Ismail di dalam Kitab (Al-Qur'an). Dia benar-benar seorang yang benar janjinya, seorang rasul dan nabi. (Maryam\u002F19: 54)","103":s,"104":s,"105":s,"106":Y,"107":Y,"108":f,"109":f,"110":f,"111":f,"112":"Ayat ini menjelaskan bahwa Allah telah menyampaikan berita gembira kepada Ibrahim tentang akan lahirnya seorang putra dari istrinya yang pertama, Sarah. Berita ini disampaikan oleh malaikat, yang menyamar sebagai manusia, ketika bertamu ke rumahnya padahal ketika itu Sarah sudah tua. Firman Allah: \n\nMaka dia (Ibrahim) merasa takut terhadap mereka. Mereka berkata, \"Janganlah kamu takut,\" dan mereka memberi kabar gembira kepadanya dengan (kelahiran) seorang anak yang alim (Ishak). Kemudian istrinya datang memekik (tercengang) lalu menepuk wajahnya sendiri seraya berkata, \"(Aku ini) seorang perempuan tua yang mandul.\" Mereka berkata, \"Demikianlah Tuhanmu berfirman. Sungguh, Dialah Yang Mahabijaksana, Maha Mengetahui.\" (adz-dzariyat\u002F51: 28-30)\n\nMalaikat juga memberitahukan bahwa Ishak itu adalah seorang nabi dan darinya akan diturunkan Yakub yang juga seorang nabi. Keduanya adalah termasuk hamba-hamba Allah yang saleh, orang yang suka berbuat kebajikan, dan membawa kemaslahatan kepada umatnya.\n\nMengenai berita kelahiran Ishak ini, diberitakan Allah juga dalam surah-surah lain seperti dalam Surah Hud\u002F11: 69-73, Surah Maryam\u002F19: 49 dan Surah al-Anbiya'\u002F21: 72.\n\nDi kalangan ulama tafsir terdapat pendapat bahwa Ishaklah yang akan disembelih oleh Ibrahim untuk memenuhi perintah Tuhan, bukan kakaknya Ismail. Ibnu Katsir dalam tafsirnya mengutip keterangan al-Bagawi menyatakan bahwa Umar, Ali, Ibnu Mas'ud dan al-'Abbas (Ibnu 'Abbas) berpendapat Ishaklah yang akan dijadikan korban itu. Sumber pendapat demikian ini adalah dari orang Yahudi yang masuk agama Islam. Menurut Ibnu Katsir, semua pendapat yang mengatakan bahwa Ishak yang akan disembelih bersumber dari Ka'bul-Akhbar. Dia seorang Yahudi yang masuk Islam pada zaman Khalifah Umar, dan membacakan isi kitab Taurat itu kepada Umar. \n\nBerbicara masalah perbedaan pendapat tentang sembelihan ini, Ibnu al-Qayyim dalam kitabnya Zadul Ma'ad mengatakan bahwa pendapat yang benar menurut ulama-ulama sahabat, para tabiin, dan ulama-ulama kemudian, Ismaillah yang menjadi sembelihan Ibrahim. Pendapat yang mengatakan sembelihan itu Ishak sangat salah dipandang dari pelbagai segi. Ibnu Taimiyah, sebagaimana dikutip Ibnu al-Qayyim, berkata, \"Pendapat tersebut dilancarkan oleh Ahli Kitab, padahal ia bertentangan dengan isi kitab sendiri.\"\n\nDalam kitab Taurat dikatakan bahwasanya Allah memerintahkan Ibrahim menyembelih anaknya yang pertama lahir. Baik orang Islam maupun Ahli Kitab sepakat bahwa putra yang pertama kali lahir adalah Ismail. Akan tetapi kemudian, mereka melakukan pemutarbalikan isi Taurat dengan mencantumkan kata-kata: Sembelihlah anakmu Ishak. Menurut Ibnu Taimiyah, \"Itulah tambahan hasil pemutarbalikan orang Yahudi, karena tambahan itu bertentangan dengan kata-kata anak pertama, satu-satunya kedengkian mereka kepada keturunan Ismail yang memperoleh kemuliaan, menyebabkan mereka melakukan pemalsuan isi kitab ini.\"\n\nAlasan kedua yang dikemukakan Ibnu Taimiyah didasarkan pada keterangan Al-Qur'an:\n\nMaka Kami sampaikan kepadanya kabar gembira tentang (kelahiran) Ishak dan setelah Ishak (akan lahir) Yakub. (Hud\u002F11: 71)\n\nAllah mengabarkan kepada Sarah akan kelahiran Ishak, yang akan menurunkan anak yang bernama Yakub. Maka tidaklah mungkin Allah menyampaikan kelahiran Ishak lalu memerintahkan menyembelihnya padahal telah dinyatakan darinya akan diturunkan Yakub. Bagaimana mungkin Yakub lahir ke dunia kalau bapaknya dijadikan sembelihan, padahal dia dijanjikan akan lahir dari keturunan Ishak? Jadi kalau demikian bukanlah Ishak yang dijadikan sembelihan tetapi Ismail.\n\nAlasan ketiga Ibnu Taimiyah menunjuk berita Ibrahim dan anaknya dalam Surah ash-shaffat ini. Dalam ayat 103-111 diceritakan ketika Ibrahim akan menyembelih anaknya untuk melaksanakan perintah Allah, lalu datang suara menegurnya dari belakang, yang menyeru bahwa Ibrahim dengan tindakannya itu dipandang sudah melaksanakan perintah Allah. Atas ketaatannya yang tulus itu, Ibrahim memperoleh pahala dan pujian dari Allah.\n\nSesudah peristiwa itu, Allah lalu memberitahu Ibrahim tentang kelahiran Ishak, sebagai ganjaran Allah atas kesabaran dan ketaatannya. Dengan demikian, tentu bukan Ishak yang akan disembelih, karena dia belum lahir.\n\nAlasan keempat: bahwa peristiwa Ibrahim akan menyembelih anaknya itu terjadi di dekat Mekah, tidak ada yang meragukan. Oleh karena itu, ibadah kurban diadakan pada hari raya haji. Juga sa'i antara Safa dan Marwah serta melempar jumrah dalam ibadah haji merupakan kenangan pada peristiwa yang menimpa Ismail dan ibunya. Seperti diketahui, Ismail dan ibunya tinggal di Mekah. Waktu dan tempat ibadah kurban selalu dihubungkan dengan Baitulharam. Jika sekiranya Ishak yang akan dijadikan sembelihan, tentulah upacara ibadah kurban diadakan di tempat dimana Ishak tinggal (Syam), tidak di Mekah.\n\nDemikianlah beberapa alasan yang dikemukakan Ibnu Taimiyah untuk membantah pendapat yang mengatakan bahwa Ishak yang menjadi sembelihan itu. (Lihat juga keterangan yang terdapat dalam kosakata Ibrahim dan Ismail)","113":"Ayat ini menjelaskan bahwa keberkahan dan kesejahteraan hidup dunia dan akhirat dilimpahkan Allah kepada Ibrahim dan Ishak. Dari keduanya lahir keturunan yang tersebar luas dan dari keturunan mereka banyak muncul para nabi dan rasul. Orang Islam disuruh agar selalu memohon kepada Tuhan setiap kali salat kiranya Ibrahim dan keluarganya diberi berkah dan kebahagiaan.\n\nDari anak cucu mereka yang menyebar luas di muka bumi, ada yang berbuat kebaikan dan ada pula yang zalim terhadap dirinya sendiri. Mereka yang berbuat baik ialah mereka yang beriman kepada Allah, menjunjung tinggi perintah-Nya, dan menjauhi larangan-Nya sesuai dengan petunjuk rasul-rasul-Nya. Adapun mereka yang berbuat zalim terhadap dirinya ialah mereka yang mengingkari agama yang dibawa para rasul serta berbuat fasik dan kemaksiatan.\n\nAyat ini mengingatkan manusia bahwa dari keluarganya yang mulia dan terhormat, kemungkinan lahir turunan yang baik atau jelek. Keturunan atau ras tidak memberikan jaminan untuk menjadi mulia atau hina bagi keturunan karena hal itu masih tergantung kepada usaha pendidikan dan pembinaan terhadap anak. Ibrahim, Ishak, dan Yakub adalah orang-orang yang dinyatakan Allah telah mencapai tingkat kemuliaan. Firman Allah:\n\nDan ingatlah hamba-hamba Kami: Ibrahim, Ishak, dan Yakub yang mempunyai kekuatan-kekuatan yang besar dan ilmu-ilmu (yang tinggi). (shad\u002F38: 45)\n\nAkan tetapi, keturunan Yakub yang disebut Bani Israil, baik dalam sejarah kuno maupun sejarah modern, banyak sekali mengalami penderitaan dan penghinaan. Penyebabnya adalah karena mereka berbuat zalim terhadap diri mereka sendiri, durhaka terhadap leluhur mereka, dan meninggalkan petunjuk Allah dan para nabi.","114":"Allah menjelaskan bahwa Dia telah menganugerahkan kepada Musa dan Harun kenikmatan yang besar yakni kenabian dan kerasulan. Mereka juga diberi kepercayaan untuk memikul tugas yang mulia yaitu memimpin Bani Israil dan membebaskan mereka dari perbudakan Fir'aun dan membawa kembali ke negeri asal mereka. Tugas ini sangat berat. Jika bukan karena pertolongan Allah, tentu mereka mengalami kebinasaan.\n\nKisah Musa paling banyak disebutkan dalam Al-Qur'an. Sebagai seorang rasul, dia mempunyai banyak persamaan dengan Nabi Muhammad sebagaimana diterangkan Allah dalam Surah al-Muzzammil\u002F73 ayat 15.","115":Z,"116":Z,"117":_,"118":_,"119":$,"120":$,"121":aa,"122":aa,"123":"Pada ayat ini, Allah menegaskan bahwa Ilyas adalah seorang rasul yang diutus Allah. Menurut ath-thabari, Ilyas adalah putra Yasin bin Finhas bin 'Iyzar bin Nabi Harun saudara Nabi Musa. Masa kenabiannya setelah kenabian Nabi Sulaiman. Ia diutus Allah kepada Bani Israil ketika kaumnya itu tidak lagi menyembah Allah, tetapi menyembah berhala. Raja-raja mereka juga mendukung agama berhala tersebut, bahkan membangun tempat-tempat khusus penyembelihan hewan untuk dipersembahkan kepada berhala tersebut.","124":"Nabi Ilyas memperingatkan kaumnya agar bertakwa kepada Allah, yaitu mengerjakan segala perintah-Nya dan menjauhi segala larangan-Nya. Takwa adalah inti ajaran para nabi sampai Nabi Muhammad. Bila mereka takwa mereka akan bahagia di dunia dan di akhirat, tetapi bila tetap kafir maka mereka akan ditimpa azab yang dahsyat dari Allah.","125":"Nabi Ilyas meminta mereka agar meninggalkan penyembahan patung yang mereka beri nama Ba'l. Menurut sebagian ulama Ba'l adalah nama patung orang-orang Funisia pada zaman sebelum masehi. Ada pula yang mengatakan bahwa Ba'l adalah nama patung yang disembah penduduk kota Ba'labak di barat Damaskus. Nabi Ilyas mengecam mereka, mengapa mereka menyembah patung itu, karena patung itu tidak mencipta bahkan tidak bisa berbuat apa-apa. Yang patut dijadikan Tuhan dan disembah adalah yang mencipta bukan patung Ba'l yang tidak bisa berbuat apa-apa tersebut.","126":"Nabi Ilyas menegaskan bahwa Tuhan yang Maha Pencipta itu adalah Allah. Allah-lah yang menciptakan mereka dan nenek moyang mereka. Karena itu Allah-lah Tuhan mereka yang sebenarnya dan juga Tuhan nenek moyang mereka, yaitu Nabi Ibrahim, Nabi Ismail, Nabi Ishak dan Nabi Yakub. Sebelum meninggal, Nabi Yakub telah menerima janji dari anak-anaknya bahwa mereka hanya akan mempertuhankan Allah, sebagaimana diterangkan dalam Al-Qur'an:\n\nApakah kamu menjadi saksi saat maut akan menjemput Yakub, ketika dia berkata kepada anak-anaknya, \"Apa yang kamu sembah sepeninggalku?\" Mereka menjawab, \"Kami akan menyembah Tuhanmu dan Tuhan nenek moyangmu yaitu Ibrahim, Ismail dan Ishak, (yaitu) Tuhan Yang Maha Esa dan kami (hanya) berserah diri kepada-Nya.\" (al-Baqarah\u002F2: 133)\n\nDengan penyembahan patung Ba'l itu berarti bahwa mereka telah melanggar ikrar nenek moyang mereka tersebut.","127":ab,"128":ab,"129":"Ayat ini menjelaskan kemuliaan yang diberikan Allah kepada Nabi Ilyas atas perjuangannya yang tidak kenal lelah dalam menyampaikan dakwah kepada manusia. Kemuliaan itu sama dengan kemuliaan yang diberikan kepada Nabi Nuh, Ibrahim, Musa, dan Harun, yaitu dikenangnya nama mereka sepanjang masa oleh umat beragama. Di antaranya adalah dipujinya nama mereka dalam Al-Qur'an yang lestari sampai akhir zaman.","130":"Allah mengucapkan salam kepada Ilyasin (bentuk jamak Ilyas), yaitu Nabi Ilyas dan orang-orang yang menerima dan mendukung ajaran yang disampaikannya. Ucapan salam dari Allah adalah kepastian keselamatan dan kesejahteraan sepanjang masa dari Allah bagi Nabi Ilyas dan para pengikutnya di dunia dan di akhirat. \n\nImam Nafi' membaca ال ياسين dengan ali Yasin. seperti ali Muhammad, sedangkan Imam Hafsh membacanya Ilyasin. Kemudian ahli tafsir berbeda pendapat apakah ال ياسين maksudnya Ilyas atau keluarga Yasin.. Namun demikian, kebanyakan ulama berpendapat bahwa maksudnya adalah keluarga atau pengikutnya.","131":"Pada ayat ini, Allah menegaskan bahwa kemuliaan yang diberikan kepada Nabi Ilyas itu adalah karena kebajikan yang telah ia perbuat. Ia telah berjuang menegakkan agama tauhid dan meluruskan kembali jalan kehidupan yang ditempuh kaumnya, Bani Israil. Ia telah berdakwah dengan penuh pengorbanan secara tulus ikhlas. Kepentingannya bukan untuk dirinya, tetapi bagaimana supaya umatnya beriman dan berbuat baik dalam hidup mereka.","132":"Allah memuji Nabi Ilyas karena termasuk salah satu hamba-Nya yang beriman. Ia seorang yang benar-benar beriman sehingga ia mengabdikan diri untuk-Nya. Karena keimanannya, Nabi Ilyas bisa memberikan pengorbanan yang besar bagi kebaikan umatnya. Iman memang perlu dibuktikan dengan perbuatan baik, dan Nabi Ilyas telah membuktikannya.","133":"Pada ayat ini diterangkan bahwa Nabi Lut adalah seorang rasul Allah. Ia sezaman dengan Nabi Ibrahim. Ia diutus Allah ke negeri bernama Sodom di daerah Palestina. Penduduk negeri ini terkenal dengan perilaku homoseksual. Nabi Lut berusaha menyadarkan mereka dengan menyatakan bahwa perbuatan mereka itu menyimpang dan dikutuk Allah. Allah berfirman:\n\nDan (ingatlah) ketika Lut berkata kepada kaumnya, \"Kamu benar-benar melakukan perbuatan yang sangat keji (homoseksual) yang belum pernah dilakukan oleh seorang pun dari umat-umat sebelum kamu. Apakah pantas kamu mendatangi laki-laki, menyamun dan mengerjakan kemungkaran di tempat-tempat pertemuanmu?\" Maka jawaban kaumnya tidak lain hanya mengatakan, \"Datangkanlah kepada kami azab Allah, jika engkau termasuk orang-orang yang benar.\" (al-'Ankabut\u002F29: 28-29)\n\nTetapi peringatan dan nasihat Nabi Lut itu tidak mereka indahkan, bahkan mereka menantang Nabi Lut untuk segera memohon kepada Allah untuk mendatangkan azab kepada mereka","134":t,"135":t,"136":t,"137":ac,"138":ac,"139":"Dalam ayat ini Allah menegaskan bahwa Nabi Yunus adalah seorang rasul Allah. Ia diutus ke negeri Niniveh (Nainawa), salah satu kota kerajaan Asyuria di pinggir sungai Tigris (daerah Mosul, Irak sekarang). Ia berusaha menyadarkan kaumnya untuk tidak mempertuhankan berhala, dan mengajak mereka untuk mempercayai dan menyembah Tuhan Yang Maha Esa, yaitu Allah swt, tetapi mereka menentangnya.","140":u,"141":u,"142":u,"143":ad,"144":ad,"145":ae,"146":ae,"147":af,"148":af,"149":"Allah meminta Nabi Muhammad agar menanyakan kepada kaum kafir Mekah tentang kepercayaan mereka bahwa Allah punya anak, dan anaknya itu perempuan, padahal anak perempuan itu dalam pandangan mereka rendah, sebagaimana firman Allah:\n\nPadahal apabila seseorang dari mereka diberi kabar dengan (kelahiran) anak perempuan, wajahnya menjadi hitam (merah padam), dan dia sangat marah. (an-Nahl\u002F16: 58)\n\nYang mulia dalam pandangan mereka adalah anak laki-laki, karena anak laki-laki itu mampu berperang dan membela mereka serta mengharumkan nama keluarga. Karena itu mereka mengambil anak laki-laki sedangkan anak perempuan mereka nisbahkan kepada Allah. Dengan demikian, mereka berdasarkan pandangan yang keliru dan mau menang sendiri. Pembagian menurut kepercayaan mereka itu menjadi tidak adil, sebagaimana dinyatakan ayat berikut:\n\nApakah (pantas) untuk kamu yang laki-laki dan untuk-Nya yang perempuan? Yang demikian itu tentulah suatu pembagian yang tidak adil. (an-Najm\u002F53: 21-22).\n\nPemberian anak perempuan, yang mereka pandang rendah, kepada Allah dan anak laki-laki untuk mereka, berarti mereka merendahkan Allah. Pertanyaan yang diminta Allah untuk diajukan Nabi Muhammad kepada kaum kafir Mekah itu sekaligus mengandung arti bahwa pandangan mereka itu salah. Dalam pandangan Allah tidak ada perbedaan laki-laki dan perempuan. Yang membedakan manusia hanyalah takwanya.","150":"Anak perempuan yang mereka maksud sebagai anak Allah adalah malaikat. Lalu Allah memperkeras bantahan-Nya dengan mempertanyakan lebih lanjut apakah mereka menyaksikan ketika Allah menciptakan atau melahirkan malaikat sebagai anak perempuan-Nya. Mereka tidak punya bukti apa-apa tentang hal itu, begitu juga bukti lain yaitu wahyu. Dengan demikian pandangan mereka itu salah, dan merupakan ucapan yang tidak dapat dipertanggungjawabkan karena dosanya amat besar, sebagaimana dinyatakan ayat berikut:\n\nDan mereka menjadikan malaikat-malaikat hamba-hamba (Allah) Yang Maha Pengasih itu sebagai jenis perempuan. Apakah mereka menyaksikan penciptaan (malaikat-malaikat itu)? Kelak akan dituliskan kesaksian mereka dan akan dimintakan pertanggungjawaban. (az-Zukhruf\u002F43: 19)","151":ag,"152":"(151-153) Selanjutnya Allah mengecam lebih keras lagi ucapan atau pandangan mereka bahwa Allah punya anak itu. Allah menegaskan bahwa pandangan mereka itu hanyalah suatu kebohongan besar yang direkayasa. Karena rekayasa seperti itu maka Allah mencap mereka sebagai pembohong-pembohong besar. Untuk mempertegas kecaman terhadap kebohongan mereka itu, Allah bertanya, “Apakah Ia memilih anak perempuan daripada anak laki-laki?” Maksudnya: anak perempuan rendah dalam pandangan mereka, dan anak laki-laki mulia, lalu apakah Allah akan memilih anak perempuan dan untuk mereka anak laki-laki? Bila demikian keadaannya berarti Allah bodoh dan mereka pintar. Pandangan itulah yang dikecam Allah, karena Allah tidak mungkin beranak dan tidak memerlukan anak, dan tidak boleh dilecehkan dengan pandangan seperti itu, bahwa untuk Allah cukup anak perempuan sedangkan untuk mereka anak laki-laki. Mereka harus mempertanggungjawabkan dosa besar karena pandangan yang keliru itu dan dosa orang-orang yang mengikutinya. Firman Allah:\n\n\"Maka apakah pantas Tuhan memilihkan anak laki-laki untukmu dan Dia mengambil anak perempuan dari malaikat? Sungguh, kamu benar-benar mengucapkan kata yang besar (dosanya)\". (al-Isra'\u002F17: 40)","153":ag,"154":ah,"155":ah,"156":ai,"157":ai,"158":"Di samping kaum kafir Mekah itu memandang malaikat sebagai anak Allah, mereka juga memandang Allah punya hubungan nasab (kekerabatan) dengan jin. Yaitu bahwa Allah memperistri sejumlah jin-jin perempuan, dan dari hubungan itu lahirlah malaikat dan malaikat itu jenisnya perempuan. Pandangan itu sangat keliru, karena bila demikian jin-jin itu berkedudukan sama dengan Allah, padahal mereka sendiri mengakui bahwa mereka pun nanti akan dihadirkan di depan-Nya, diminta tanggung jawabnya berkenaan dengan perbuatan-perbuatan mereka, serta disiksa bila bersalah. Dengan pertanggungjawaban itu berarti bahwa mereka tidaklah sama dengan Allah dan bukan keluarga Allah, tetapi adalah hamba-hamba-Nya yang akan diberi pahala bila berbuat baik dan akan dihukum bila berbuat jahat, sesuai dengan firman-Nya:\n\nDan mereka berkata, \"Tuhan Yang Maha Pengasih telah menjadikan (malaikat) sebagai anak.\" Mahasuci Dia. Sebenarnya mereka (para malaikat itu) adalah hamba-hamba yang dimuliakan (al-Anbiya'\u002F21: 26)","159":aj,"160":aj,"161":v,"162":v,"163":v,"164":"Pada ayat ini disampaikan pengakuan malaikat mengenai dirinya, yaitu bahwa mereka memanggul fungsi dan tugas tertentu. Mereka menjalankan fungsi dan tugasnya itu tanpa mengurangi atau menambah sedikit pun dari yang diperintahkan Allah swt sebagaimana dinyatakan dalam firman-Nya:\n\n¦yang tidak durhaka kepada Allah terhadap apa yang Dia perintahkan kepada mereka dan selalu mengerjakan apa yang diperintahkan. (at-Tahrim\u002F66: 6)","165":"Lebih jauh para malaikat itu menjelaskan bahwa mereka dalam menjalankan tugasnya berbaris-baris, yaitu selalu sigap melaksanakan tugasnya dan bekerjasama dalam kesatuan-kesatuan yang kuat. Dengan berbaris-baris seperti itu maka tugas dilaksanakan mereka dengan penuh semangat, gegap-gempita, dan sempurna, sehingga pelaksanaan tugas itu sukses secara maksimal tanpa ada yang kurang atau yang lebih. Pelaksanaan tugas secara serius itu memberikan petunjuk bahwa mereka sangat patuh kepada Allah dan menjalankan perintah-Nya.\n\nKepatuhan dan keseriusan malaikat menjalankan tugasnya itu perlu ditiru oleh kaum muslimin. Dalam sebuah hadis sahih yang diriwayatkan oleh Muslim yang bersumber dari Jabir bin Samurah, ia mengatakan: \n\nDari Jabir bin Samurah bahwa Rasulullah suatu ketika keluar menemui kami sedang kami berada di dalam masjid, lalu beliau bersabda, 'Mengapa kalian tidak berbaris seperti malaikat berbaris di sisi Tuhannya? Lalu kami bertanya, 'Ya, Rasulullah, bagaimana caranya malaikat-malaikat itu berbaris di sisi Tuhannya? Rasulullah bersabda, 'Mereka mengisi sampai penuh barisan pertama dan merapatkannya.\" (Riwayat Muslim)\n\nKarena terinspirasi oleh ayat itu, Khalifah Umar bin Khaththab mengatur saf-saf sebelum mengimami salat. Dilaporkan oleh Abu Nadhrah: \n\nUmar r.a. ketika iqamat dilantunkan, ia menghadap kepada jamaah dan berkata, \"Atur saf-saf kalian, luruskan barisan kalian! Allah Ta'ala ingin kalian mengikuti perilaku malaikat.\" Kemudian ia membaca ayat: \"wa inna lanahnu ash-shaffun\" \"Hai Fulan mundur, hai Fulan maju!\" Setelah itu ia maju ke depan dan membaca takbir (mengimami salat). (Riwayat Ibnu Abi hatim dan Ibnu Jarir).","166":"Kemudian Allah menjelaskan perilaku malaikat bahwa mereka selalu bertasbih kepada-Nya. Bertasbih adalah mensucikan Allah dari sifat-sifat yang tidak layak bagi-Nya, baik berupa sifat-sifat kekurangan, seperti lemah, mengantuk, perlu pembantu\u002Fanak dan sebagainya atau sifat-sifat tercela seperti pemarah, zalim, dan sebagainya. Bertasbih itu tidak cukup hanya dengan ucapan, dengan membaca subhanallah, tetapi perlu diiringi dengan perbuatan. Contoh tasbih yang sempurna adalah apa yang dikerjakan malaikat, dimana mereka tidak hanya terus menerus memuji Allah tetapi juga melaksanakan sepenuhnya perintah-perintah-Nya.","167":w,"168":w,"169":w,"170":"Allah menjelaskan bahwa rasul yang mereka tunggu-tunggu itu sebenarnya sudah datang, yaitu Nabi Muhammad dan pedoman yang mereka dambakan itu sudah ada yaitu Al-Qur'an. Akan tetapi, mereka mengingkari nabi dan kitab suci tersebut. Tindakan mereka itu diterangkan dalam ayat lain:\n\nDan mereka bersumpah dengan nama Allah dengan sungguh-sungguh bahwa jika datang kepada mereka seorang pemberi peringatan, niscaya mereka akan lebih mendapat petunjuk dari salah satu umat-umat (yang lain). Tetapi ketika pemberi peringatan datang kepada mereka, tidak menambah (apa-apa) kepada mereka, bahkan semakin jauh mereka dari (kebenaran) (Fathir\u002F35: 42)\n\nDi akhir ayat, Allah swt menegaskan bahwa mereka yang kafir nanti akan tahu apa akibat kekafiran mereka. Yaitu bahwa mereka akan sengsara baik di dunia dengan kekalahan, maupun di akhirat yaitu disiksa dalam neraka selama-lamanya. Ancaman itu seharusnya membuat mereka takut lalu beriman.","171":x,"172":x,"173":x,"174":ak,"175":ak,"176":al,"177":al,"178":am,"179":am,"180":y,"181":y,"182":y}}}}},jsonldBreadcrumb:{"@context":an,"@type":"BreadcrumbList",itemListElement:[{"@type":z,position:1,name:"Home",item:"https:\u002F\u002Fwww.baca-quran.id\u002F"},{"@type":z,position:2,name:"QS 37",item:"https:\u002F\u002Fwww.baca-quran.id\u002F37\u002F"},{"@type":z,position:C,name:"QS 37:3",item:ao}]},jsonLdArticle:{"@context":an,"@type":"NewsArticle",mainEntityOfPage:{"@type":"WebPage","@id":ao},headline:A,image:["https:\u002F\u002Fwww.baca-quran.id\u002Fmeta-image.png"],datePublished:ap,dateModified:ap,author:{"@type":"Person",name:"Irfan Maulana"},description:B,publisher:{"@type":"Organization",name:"mazipan",logo:{"@type":"ImageObject",url:"https:\u002F\u002Fwww.baca-quran.id\u002Ficon.png"}}}}],fetch:{},mutations:[]}}("Sesudah melihat keadaan kaumnya tertegun menunduk-kan kepala, Nabi Ibrahim lalu berkata lagi kepada mereka bahwa tidak patut mereka menyembah patung-patung yang mereka pahat dengan tangannya sendiri. Mereka mestinya bersyukur bahwa dari kalangan mereka sendiri, lahir seorang yang punya akal pikiran, yang mencegah penyembahan patung-patung itu. Nabi Ibrahim menegaskan lagi bahwa yang patut disembah hanyalah Allah yang menciptakan mereka dan patung-patung sesembahan mereka itu. Tuhan Maha Pencipta lebih berhak disembah daripada makhluk-Nya. Firman Allah:\n\nDia (Ibrahim) berkata, \"Mengapa kamu menyembah selain Allah, sesuatu yang tidak dapat memberi manfaat sedikit pun, dan tidak (pula) mendatangkan mudarat kepada kamu? Celakalah kamu dan apa yang kamu sembah selain Allah! Tidakkah kamu mengerti?\" (al-Anbiya'\u002F21: 66-67)\n\nAlasan yang disampaikan Nabi Ibrahim tidak dapat mereka bantah dengan alasan pula, sehingga mereka menempuh cara kekerasan menantang Ibrahim. Mereka merencanakan membunuh Ibrahim. Lalu didirikanlah sebuah bangunan untuk dijadikan tempat pembakaran Nabi Ibrahim. Ketika bangunan itu telah selesai dan apinya telah dinyalakan, lalu Nabi Ibrahim dilemparkan ke dalamnya. Firman Allah:\n\nMereka berkata, \"Bakarlah dia dan bantulah tuhan-tuhan kamu, jika kamu benar-benar hendak berbuat.\" (al-Anbiya'\u002F21: 68)\n\nKaum Ibrahim benar-benar menghendaki ia binasa dan hangus terbakar dalam unggun api itu. Akan tetapi, Allah berkehendak menyelamatkan dia dari kebinasaan dengan memerintahkan kepada api supaya tidak membakar Ibrahim, sebagaimana firman-Nya:\n\nKami (Allah) berfirman, \"Wahai api! Jadilah kamu dingin, dan penyelamat bagi Ibrahim!\" (al-Anbiya'\u002F21: 69)\n\nDengan demikian, Nabi Ibrahim selamat dari unggun api, dan mendapat kemenangan atas orang kafir.\n\nSesudah beliau tidak melihat lagi tanda-tanda kesediaan kaumnya untuk beriman, maka beliau bermaksud untuk meninggalkan mereka, hijrah dari kampung halaman. Barangkali di tempat yang baru itu, beliau dapat beribadah kepada Tuhan tanpa gangguan dari kaum yang ingkar, dan dapat mengembangkan agama dengan taufik dan hidayah Allah. Adapun negeri yang beliau tuju ialah Baitulmakdis.","Allah pada ayat ini membantah tuduhan orang-orang kafir Mekah itu. Nabi Muhammad saw tidak pernah mengucapkan kalimat-kalimat khayalan sebagai penyair, tetapi sesungguhnya beliau pembawa dan pendukung kebenaran. Ajaran tauhid yang disebarluaskan beliau tidak perlu lagi diragukan, sebab keesaan Tuhan itu dikukuhkan oleh pikiran yang sehat dan dapat dibuktikan dengan dalil-dalil yang nyata. Tidaklah patut bilamana Rasul itu dikatakan penyair padahal dia membawa ajaran yang benar. Ajaran yang sama telah dibawakan pula sebelumnya oleh para nabi-nabi terdahulu.\n\nAjaran tauhid yang dibawa beliau meneruskan ajaran tauhid yang dibawa oleh nabi-nabi dahulu, dan bukan sekali-kali buatan Muhammad saw. Jadi tuduhan kepada Rasul sebagai penyair dan orang gila hanyalah karena kebencian dan keingkaran semata-mata. Allah pastilah akan menimpakan azab yang pedih dan hukuman yang berat kepada orang-orang kafir yang menuduh Rasul dengan tuduhan nista itu. Azab bagi mereka yang ingkar kepada ajaran rasul-rasul itu bisa jadi dirasakan di dunia ini, sebelum dirasakan di akhirat. Seperti azab yang diderita oleh kaum Samud, Fir'aun dan lain-lain. Namun Tuhan tidak akan menurunkan azab kepada manusia kecuali hanya sebagai balasan dan akibat dari perbuatan mereka sendiri. Allah berfirman:\n\nBarangsiapa mengerjakan kebajikan maka (pahalanya) untuk dirinya sendiri dan barangsiapa berbuat jahat maka (dosanya) menjadi tanggungan dirinya sendiri. Dan Tuhanmu sama sekali tidak menzalimi hamba-hamba(-Nya). (Fushshilat\u002F41: 46)","Penghuni surga itu berkata kepada teman-temannya supaya mereka mau meninjau keadaan ahli surga. Dengan peninjauan itu tentulah mereka akan bertambah syukur kepada Allah yang telah memberikan taufik kepada mereka untuk mengikuti petunjuk para nabi sehingga terlepas dari penderitaan api neraka.\n\nLalu ahli surga itu meninjau keadaan penghuni neraka, dan diperlihatkan kepada mereka kawan-kawannya yang kafir, sedang berada di tengah-tengah api neraka yang menyala-nyala. Pada waktu itu penghuni surga itu menuding kawannya yang berada di neraka itu, karena sewaktu di dunia hampir saja dia dijerumuskan ke dalam kekafiran oleh kawannya itu. Tetapi berkat taufik dan hidayah Allah yang dianugerahkan kepadanya, terhindarlah dia dari pengaruh paham kawannya yang kafir itu, dan selamatlah ia dari azab nereka.\n\nPercakapan antara penghuni surga dan neraka itu diterangkan Allah pula dalam firman-Nya:\n\nDan para penghuni surga menyeru penghuni-penghuni neraka, \"Sungguh, kami telah memperoleh apa yang dijanjikan Tuhan kepada kami itu benar. Apakah kamu telah memperoleh apa yang dijanjikan Tuhan kepadamu itu benar?\" Mereka menjawab, \"Benar.\" Kemudian penyeru (malaikat) mengumumkan di antara mereka, \"Laknat Allah bagi orang-orang zalim. (al-A.'raf\u002F7: 44)\n\nFirman Allah:\n\nPara penghuni neraka menyeru para penghuni surga, \"Tuangkanlah (sedikit) air kepada kami atau rezeki apa saja yang telah dikaruniakan Allah kepadamu.\" Mereka menjawab, \"Sungguh, Allah telah mengharamkan keduanya bagi orang-orang kafir.\" (al-A.'raf\u002F7: 50)","Pada ayat ini Allah menjelaskan pernyataan penghuni surga itu bahwa mereka sangat puas terhadap nikmat dan kebahagiaan di surga. Mereka merasakan keadaan hidup dalam surga, tidak akan mengalami kematian lagi dan tidak pula akan menderita azab. Satu-satunya kematian yang mereka alami ialah kematian yang meninggalkan kehidupan dunia. Berbeda halnya dengan orang-orang kafir di dalam neraka. Meskipun mereka sudah mengalami kematian di dunia, namun mereka masih menginginkan kematian kedua kalinya untuk mengakhiri penderitaan yang bersangkutan di neraka Jahanam.\n\nAdapun penghuni surga tidak pernah meragukan keabadian hidup di surga, karena keraguan itu menimbulkan kegelisahan dan kegelisahan adalah penderitaan. Penghuni surga menyatakan lagi dengan penuh kesungguhan bahwa segala kenikmatan yang mereka peroleh, kelezatan makanan dan minuman dan segala kepuasan rohaniah di surga itu adalah kemenangan yang besar. Untuk mencapai kemenangan yang besar menurut mereka, diperlukan usaha yang sungguh-sungguh penuh keikhlasan dan pengabdian kepada Allah di dunia.","Sesudah kaumnya pergi, Ibrahim diam-diam menuju tempat patung-patung itu, lalu berkata dengan maksud mengejek, \"Mengapa patung-patung itu tidak memakan makanan yang dihidangkan di hadapannya.\" Sesajen itu disuguhkan oleh para penyembahnya pada hari-hari tertentu untuk mengharapkan berkah.\n\nTentu saja patung-patung itu tidak berkata apa-apa. Akan tetapi, Ibrahim bertanya lagi, \"Mengapa patung-patung itu tidak menjawab pertanyaanku?\" Kemudian patung-patung itu dipukulnya dengan keras sampai hancur kecuali sebuah patung yang paling besar. Peristiwa ini menimbulkan kemarahan kaumnya. Lalu mereka mencari pelakunya dan memperoleh keterangan bahwa Ibrahimlah yang memecahkan patung-patung itu. Mereka cepat-cepat menemui Ibrahim dan menanyakan kepadanya, apakah benar dia memecahkan patung-patung itu. Ibrahim mengelak dari pertanyaan itu dan mengatakan bahwa patung yang besar itulah yang memecahkannya. Setelah mendengar ucapan Ibrahim, kaumnya menundukkan kepala dan merenungkan diri masing-masing. Tidak ada yang dapat mereka perbuat terhadap patung besar itu, yang selama ini mereka sembah.","Ayat-ayat ini menerangkan bahwa umat manusia dari berbagai agama (samawi) dan golongan mencintai Nabi Ibrahim sepanjang masa. Penganut agama Yahudi, Nasrani, dan Islam menghormatinya dan memuji namanya, bahkan kaum musyrik Arab mengakui bahwa agama mereka juga mengikuti agama Islam (Ibrahim).\n\nDemikianlah Allah memenuhi permohonan Nabi Ibrahim ketika berdoa: \n\nDan jadikanlah aku buah tutur yang baik bagi orang-orang (yang datang) kemudian, dan jadikanlah aku termasuk orang yang mewarisi surga yang penuh kenikmatan. (asy-Syu'ara'\u002F26: 84-85)\n\nKemudian Allah memberikan penghargaan kepada Ibrahim bahwa Dia memberikan salam sejahtera kepadanya. Salam sejahtera untuk Ibrahim ini terus hidup di tengah-tengah umat manusia bahkan juga di kalangan malaikat. Dengan demikian, ada tiga pahala yang telah dianugerahkan Allah kepadanya, yaitu seekor kambing besar yang didatangkan kepadanya sebagai ganti dari anaknya, pengabadian yang memberi keharuman namanya sepanjang masa, dan ucapan salam sejahtera dari Tuhan dan manusia. Begitulah Allah memberikan ganjaran kepada hamba-hamba-Nya yang berbuat kebaikan. Semua ganjaran itu sebagai imbalan ketaatannya melaksanakan perintah Allah.\n\nIbrahim mencapai prestasi yang tinggi itu karena dorongan iman yang kuat dan keikhlasan ibadahnya kepada Allah sehingga dia termasuk hamba-hamba-Nya yang beriman.","اِنَّا كَذٰلِكَ نَجْزِى الْمُحْسِنِيْنَ","اِنَّهٗ مِنْ عِبَادِنَا الْمُؤْمِنِيْنَ","Demikianlah Kami memberi balasan kepada orang-orang yang berbuat baik.","Allah menegaskan bahwa karena kekerasan hati orang-orang yang ingkar tadi, maka tidak akan ada manfaatnya apabila mereka diberi nasihat. Karena jiwa mereka telah dikotori tingkah laku dan perbuatan mereka sendiri.\n\nBilamana diperlihatkan kepada mereka dalil-dalil dan mukjizat-mukjizat yang menunjukkan kebenaran Nabi, mereka pun menertawakan dan memperolok-olokkannya serta menuduh Nabi sebagai seorang tukang sihir yang telah memperdaya pikiran mereka dan ingin menjauhkan mereka dari sembahan-sembahan nenek moyang mereka. Mereka juga mengatakan bahwa segala dalil-dalil kenabian yang beliau sampaikan dipandang sebagai permainan sihir. Mereka mengatakan bahwa semua bukti-bukti kebenaran yang dibawa Nabi itu tidak ada artinya sama sekali. Oleh karena itu, mereka menghindari seruan Nabi dan tetap berpegang kepada agama nenek moyang yang sudah dianut berabad-abad.\n\n(16-17) Allah menunjukkan keingkaran kaum musyrikin terhadap peristiwa-peristiwa pada hari Kiamat. Kejadian-kejadian pada hari Kiamat itu membingungkan akal mereka. Mereka sama sekali tidak dapat mengerti apa yang dikatakan Nabi Muhammad bahwa tulang-belulang yang berserakan dan sudah menjadi tanah dapat dihidupkan kembali. Lebih mengherankan mereka lagi adalah kebangkitan nenek moyang mereka yang sudah lama terkubur dalam bumi, yang tidak ada bekasnya lagi, sehingga dengan demikian nenek moyang mereka itu tidak dapat hidup kembali. Semua ini ditanyakan mereka kepada Nabi saw.\n\n(18-19) Allah memerintahkan Nabi Muhammad agar menjawab pertanyaan mereka secara tegas bahwa benar mereka dan nenek moyangnya akan dibangkitkan kembali sesudah menjadi tanah. Mereka yang ingkar itu menjadi hina di hadapan Allah Yang Mahatinggi. Sebagaimana Allah berfirman:\n\nSesungguhnya orang-orang yang sombong tidak mau menyembah-Ku akan masuk neraka Jahanam dalam keadaan hina dina. (al-Mu'min\u002F40: 60)\n\nDalam ayat lain Allah berfirman:\n\n¦ Dan semua mereka datang menghadap-Nya dengan merendahkan diri. (an-Naml\u002F27: 87)\n\nTerjadinya hari Kiamat sangatlah mudah bagi Allah. Dengan satu teriakan saja yang ditiupkan dari sangkakala manusia akan bangkit dari kubur dan hidup kembali. Pada waktu itu, mereka akan menyaksikan terlaksananya ancaman Allah.","Kepada malaikat diperintahkan supaya menahan mereka di tempat pemberhentian dan menanyakan tentang apa yang mereka usahakan, serta dosa dan kemaksiatan yang telah mereka lakukan. Pada waktu itu juga ditanyakan tentang akidah-akidah palsu yang diajarkan oleh setan yang menyesatkan hidup mereka. persoalan ini dijelaskan dalam hadis Nabi saw:\n\nAbu Hurairah meriwayatkan bahwa Rasulullah saw bersabda, \"Tidaklah bergeser dua telapak kaki seorang hamba pada hari Kiamat sebelum dia ditanya empat perkara: tentang umur dihabiskannya untuk apa, tentang masa mudanya dipergunakan untuk apa, lalu tentang harta yang dimilikinya diperoleh dari mana, dan dipergunakan untuk apa, lalu tentang ilmu sampai sejauh mana diamalkannya. (Riwayat at-Tirmidzi)\n\nPada waktu itu orang-orang kafir bisa saling menolong satu sama lain sebagaimana mereka perkirakan di dunia dulu. Tetapi nyatanya hal itu tidak dapat dilakukan, dan mereka benar-benar ditimpa azab setimpal dengan perbuatannya. Allah berfirman:\n\n(Yaitu) pada hari (ketika) seorang teman sama sekali tidak dapat memberi manfaat kepada teman lainnya dan mereka tidak akan mendapat pertolongan. (ad-Dukhan\u002F44: 41)","Sesudah menggambarkan makanan dan tempat tinggal mereka, Allah kemudian menerangkan minuman mereka. Dengan dilayani oleh anak-anak remaja yang cakap, ahli surga itu menikmati minuman lezat, segelas khamar yang sangat jernih bagaikan air bening yang warnanya putih bersih yang sedap rasanya, ada minuman mereka yang bercampur zanjabil (jahe) yang didatangkan dari sumber air surga yang namanya salsabil sebagaimana diterangkan dalam firman Allah:\n\nDan di sana mereka diberi segelas minuman bercampur jahe. (Yang didatangkan dari) sebuah mata air (di surga) yang dinamakan Salsabil. Dan mereka dikelilingi oleh para pemuda yang tetap muda. Apabila kamu melihatnya, akan kamu kira mereka, mutiara yang bertaburan. (al-Insan\u002F76: 17-19)\n\nKenikmatan minuman yang disediakan Allah dalam surga merupakan kelengkapan kenikmatan bagi ahli surga. Mereka disuguhi bermacam ragam khamar yang melimpah ruah seolah-olah khamar itu diambilnya dari sumber bening yang mengalir tanpa putus-putusnya, setiap kali mereka meminta tentu mendapatkannya. Allah menjelaskan pula bahwa khamar dalam surga itu keadaannya jauh berbeda dengan khamar yang terdapat di dunia, baik mengenai kejernihan, warna, bau ,dan rasanya.\n\nDemikian pula pengaruh minuman terhadap jasmani dan rohani berbeda dengan khamar dunia. Khamar surga tidak membahayakan dan tidak memabukkan.","Pada ayat ini dijelaskan isi percakapan antara ahli surga. Seorang di antara mereka menceritakan kepada teman-temannya bahwa sewaktu hidup di dunia dia mempunyai seorang teman yang menanyakan kepadanya dengan nada mencemooh tentang keyakinannya akan hari kebangkitan dan hari Kiamat. Temannya itu sangat mengingkari akan terjadinya hari kebangkitan dari kubur. Dengan penuh keheranan dan keingkaran, temannya di dunia itu mengatakan bahwa tidaklah mungkin dan sangat tidak masuk akal bilamana manusia yang sudah menjadi tanah dan tulang-belulang akan dihidupkan kembali dari dalam kubur. Lalu setelah itu diadakan perhitungan terhadap amal perbuatannya semasa hidup di dunia.\n\nMenurut keyakinan orang kafir itu tidak ada lagi perhitungan antara kejahatan dan kebaikan, dan antara kufur dan iman. Semua perbuatan manusia sudah selesai diperhitungkan di dunia. Namun demikian, Allah menegaskan adanya perhitungan terakhir dengan firman-Nya:\n\nDan tidak sama orang yang buta dengan orang yang melihat, dan tidak (sama) pula orang-orang yang beriman dan mengerjakan kebajikan dengan orang-orang yang berbuat kejahatan. Hanya sedikit sekali yang kamu ambil pelajaran. Sesungguhnya hari Kiamat pasti akan datang, tidak ada keraguan tentangnya, akan tetapi kebanyakan manusia tidak beriman. (al-Mu'min\u002F40: 58-59)","Kemudian Allah menjelaskan bahwa makanan penghuni neraka itu buah pohon zaqqum. Walau pun mereka mengetahui baunya yang busuk dan rasanya yang pahit tetapi karena sangat lapar dan makanan lain tidak ada terpaksa mereka memakannya sampai penuh perut mereka.\n\nAllah berfirman:\n\nTidak ada makanan bagi mereka selain dari pohon yang berduri, yang tidak menggemukkan dan tidak menghilangkan lapar. (al-Gasyiyah\u002F88: 6-7)\n\nSehabis makan buah zaqqum itu tentulah mereka memerlukan minuman. Maka kepada mereka disediakan minuman yang bercampur dari air yang sangat panas yang menghanguskan muka mereka, sebagaimana dilukiskan Allah dalam firman-Nya:\n\nSesungguhnya Kami telah menyediakan neraka bagi orang zalim, yang gejolaknya mengepung mereka. Jika mereka meminta pertolongan (minum), mereka akan diberi air seperti besi yang mendidih yang menghanguskan wajah. (al-Kahf\u002F18: 29)\n\nSetelah mereka makan dan minum maka mereka dikembalikan ke neraka Jahim, tempat asal mula mereka.","Kemudian dijelaskan jenis doa Nabi Nuh yang dikabulkan itu, antara lain: Pertama, Allah telah menyelamatkan Nuh beserta orang-orang yang beriman, termasuk beberapa orang putranya, dari bencana yang besar yakni angin topan yang dahsyat dibarengi banjir besar. Seorang putranya ikut tenggelam. Mereka yang selamat dari banjir besar itu ialah mereka yang berada dalam kapal. Firman Allah:\n\nKemudian Kami menyelamatkannya Nuh dan orang-orang yang bersamanya di dalam kapal yang penuh muatan. Kemudian setelah itu Kami tenggelamkan orang-orang yang tinggal. (asy-Syu'ara'\u002F26: 119-120)\n\nKedua, Allah menjadikan anak cucu Nabi Nuh orang yang akan melanjutkan keturunannya, dan mereka yang membangkang dan menentang seruannya dibinasakan, seperti yang dimohon Nabi Nuh dalam doanya. \n\nKetiga, Allah mengabadikan pujian dan nama yang harum bagi Nuh di kalangan para nabi yang datang kemudian dan umat manusia sampai akhir zaman. Beliau masyhur di kalangan kaum muslimin, termasuk salah seorang dari lima rasul yang disebut ulul 'azmi yang artinya orang-orang yang mempunyai keteguhan hati. Empat rasul lainnya ialah Ibrahim, Musa, Isa, dan Muhammad saw.","Pengabadian nama Nuh dengan sebutan salam sejahtera kepadanya itu merupakan penghormatan kepadanya, dan pembalasan kepadanya atas kebajikan yang diperbuatnya dan perjuangannya dalam menegakkan kalimat tauhid yang tak henti-hentinya, siang dan malam, terang-terangan dan sembunyi-sembunyi selama ratusan tahun. Hal itu juga sebagai imbalan atas kesabarannya, dalam menahan derita lahir dan batin selama menyampaikan risalah di tengah-tengah kaumnya.\n\nYang mendorong Nabi Nuh bekerja keras membimbing kaumnya adalah kemurnian dan keikhlasan pengabdiannya kepada Allah disertai keteguhan iman dalam jiwanya. Oleh karena itu, Allah menyatakan bahwa dia benar-benar hamba-Nya yang penuh iman. Penonjolan iman pada pribadi Nuh sebagai rasul yang mendapat pujian adalah untuk menunjukkan arti yang besar terhadap iman itu karena dia merupakan modal dari segala amal perbuatan kebajikan.\n\nAdapun kaum Nuh yang lain, yang tidak mau beriman kepada agama tauhid yang disampaikan kepada mereka, dibinasakan oleh topan dan banjir besar hingga tak seorang pun di antara mereka yang tinggal dan tak ada pula bekas peninggalan mereka yang dikenang. Mereka lenyap dari catatan sejarah manusia.","Kemudian Allah mengingatkan kita tentang kisah Nabi Ibrahim ketika dia dengan jiwanya yang bersih dan tulus ikhlas berkata kepada orang tuanya dan kaumnya mengapa mereka menyembah patung-patung. Seharusnya hal itu tidak patut terjadi jika mereka mau berpikir tentang patung-patung sembahan yang tidak memberi manfaat dan tidak pula memberi mudarat kepada mereka:\n\nFirman Allah:\n\n(Ingatlah) ketika dia (Ibrahim) berkata kepada ayahnya, \"Wahai ayahku! Mengapa engkau menyembah sesuatu yang tidak mendengar, tidak melihat, dan tidak dapat menolongmu sedikit pun? Wahai ayahku! Sungguh, telah sampai kepadaku sebagian ilmu yang tidak diberikan kepadamu, maka ikutilah aku, niscaya aku akan menunjukkan kepadamu jalan yang lurus. (Maryam\u002F19: 42-43)\n\nNabi Ibrahim dengan tegas menyatakan kepada mereka bahwa tidaklah benar sikap mereka yang menghendaki selain Allah untuk disembah dengan alasan-alasan yang tidak benar. Untuk menyembah Tuhan yang gaib diperlukan petunjuk kalau tidak penyembahan itu tentulah didasarkan atas khayalan-khayalan dan selera pikiran masing-masing orang. Hal demikian ini akan menimbulkan banyaknya bentuk penyembahan kepada Tuhan sesuai dengan konsepsi masing-masing orang tentang Tuhan.\n\nPada zaman Jahiliah, tiap-tiap kabilah Arab mempunyai berhala dan patung sendiri-sendiri sesuai dengan pikirannya masing-masing. Demikian juga zaman Nabi Ibrahim terdapat banyak patung sembahan mereka sebagai hasil imajinasi kaumnya pada waktu itu. Nabi Ibrahim yang diberi Allah ilmu pengetahuan yang tidak diberikan kepada kaumnya, tentulah beliau berusaha untuk mengubah keadaan demikian. Lalu beliau mengemukakan berbagai pertanyaan kepada kaumnya sehingga terpaksa mereka berpikir tentang diri mereka masing-masing apa dasar anggapan mereka tidak menyembah Tuhan Pencipta dan Penguasa semesta alam, bahkan sebaliknya mereka mempersekutukan-Nya dengan patung-patung dan berhala-berhala. Sebenarnya mereka tidak dapat mengemukakan alasan untuk menolak menyembah Tuhan Yang Maha Esa.","Kemudian Ibrahim melayangkan pandangannya ke bintang-bintang dengan berpikir secara mendalam bagaimana menghadapi kaumnya yang tetap bersikeras untuk menyembah patung, hanya dengan alasan mempertahankan warisan nenek moyang. Padahal, beliau sudah memberikan peringatan dan pengajaran kepada mereka, sebagaimana firman Allah:\n\n(Ingatlah), ketika dia (Ibrahim) berkata kepada ayahnya dan kaumnya, \"Patung-patung apakah ini yang kamu tekun menyembahnya?\" Mereka menjawab, \"Kami mendapati nenek moyang kami menyembahnya.\" (al-Anbiya'\u002F21: 52-53)\n\nSesudah berpikir dan mempertimbangkan dengan sungguh-sungguh, beliau memutuskan untuk mengambil tindakan yang bahaya, yaitu menghancurkan semua patung sembahan itu. \n\nPada suatu saat, kaum Ibrahim datang untuk mengundangnya guna menghadiri hari besar mereka. Beliau menolak ajakan mereka secara halus dengan alasan kesehatannya terganggu. Selain untuk menghindari hadir dalam hari besar mereka, Nabi Ibrahim bermaksud melaksanakan rencananya untuk menghancurkan patung-patung, dan menyatakan perlawanan secara terbuka terhadap pemuja patung-patung itu. Kaumnya tidak mengetahui rencana Nabi Ibrahim itu dan tidak pula mencurigainya. Juga tidak tampak pada sikapnya bahwa dia tidak jujur dalam perkataannya. Dengan demikian, upacara hari besar mereka berlangsung tanpa hadirnya Ibrahim. Alasan terganggu kesehatannya untuk tidak menghadiri undangan kaumnya, padahal sebenarnya dia tidak sakit, tidaklah dipandang dusta yang terlarang dalam agama. Bahwa Ibrahim membohongi kaumnya memang benar. Rasulullah bersabda:\n\nNabi Ibrahim tidak berbohong kecuali tiga perkataan, dua di antaranya tentang zat Allah, yaitu kata-katanya \"Saya sedang sakit\" dan \"sebenarnya yang besar ini yang memecahkannya\", dan kata-katanya mengenai istrinya Sarah \"ini saudaraku\". (Riwayat A.hmad dan asy-Syaikhan dari Abu Hurairah)\n\nKata-kata Nabi Ibrahim bahwa kesehatannya terganggu yang diucapkan di hadapan kaumnya sebenarnya untuk menghindari kehadirannya pada upacara hari besar kaumnya.\n\nIbrahim berkata, \"Sesungguhnya kami dan bapak-bapakku berada dalam kesesatan yang nyata\". Mereka menjawab, \"Apakah kamu datang kepada kami dengan sungguh-sungguh ataukah kamu termasuk orang-orang yang bermain-main?\" Ibrahim berkata, \"Sebenarnya Tuhan kamu adalah Tuhan langit dan bumi yang telah Dia ciptakan dan aku termasuk orang-orang yang dapat memberikan bukti atas yang demikian itu. Demi Allah, sesungguhnya aku akan melakukan tipu daya terhadap berhala-berhalamu sesudah kamu pergi meninggalkannya.\"\n\nDalam perayaan hari besar itu, Nabi Ibrahim mempergunakan kesempatan untuk menghancurkan patung-patung kaumnya. Kata-kata Ibrahim bahwa patung yang paling besar ini yang memecahkannya, diucapkan sewaktu dia diperiksa oleh kaumnya tentang perkara penghancuran patung. Sebenarnya dia sendiri yang memecahkan patung itu, tetapi dikatakan patung yang paling besarlah yang menghancurkannya, padahal kaumnya menyadari bahwa patung-patung itu tidak dapat berbuat apa-apa.\n\nKedua ucapan Ibrahim diucapkan dalam rangka perjuangannya menegakkan kalimat tauhid. Adapun ucapan yang ketiga, yaitu \"Sarah itu saudaraku\" padahal sebenarnya istrinya, diucapkan di hadapan raja ketika raja menginginkan Sarah.\n\nDengan demikian, ketiga perkataan yang diucapkan Ibrahim itu bukanlah kebohongan yang tercela dalam pandangan agama dan masyarakat. Rasulullah saw menjelaskan bahwa ketiga perkataan Nabi Ibrahim itu dibenarkan agama, seperti sabda Nabi saw:\n\nRasulullah bersabda tentang tiga perkataan Ibrahim dengan mengatakan bahwa tidak ada suatu dusta pun kecuali hal-hal yang dibenarkan agama Allah. (Riwayat at-Tirmidzi dari Abu Sa'id)","Tatkala keduanya sudah pasrah kepada Tuhan dan tunduk atas segala kehendak-Nya, kemudian Ismail berlutut dan menelungkupkan mukanya ke tanah sehingga Ibrahim tidak melihat lagi wajah anaknya itu. Ismail sengaja melakukan hal itu agar ayahnya tidak melihat wajahnya. Dengan demikian Nabi Ibrahim bisa dengan cepat menyelesaikan pekerjaannya. Nabi Ibrahim mulai menghunus pisaunya untuk menyembelihnya. Pada waktu itu, datanglah suara malaikat dari belakangnya, yang diutus kepada Ibrahim, mengatakan bahwa tujuan perintah Allah melalui mimpi itu sudah terlaksana dengan ditelungkupkannya Ismail untuk disembelih. Tindakan Ibrahim itu merupakan ketaatan yang tulus ikhlas kepada perintah dan ketentuan Allah. Sesudah malaikat menyampaikan wahyu itu, maka keduanya bergembira dan mengucapkan syukur kepada Allah yang menganugerahkan kenikmatan dan kekuatan jiwa untuk menghadapi ujian yang berat itu. Kepada keduanya Allah memberikan pahala dan ganjaran yang setimpal karena telah menunjukkan ketaatan yang tulus ikhlas. Mereka dapat mengatasi perasaan kebapakan semata-mata untuk menjunjung perintah Allah.\n\nMenurut riwayat A.hmad dari Ibnu 'Abbas, tatkala Ibrahim diperintahkan untuk melakukan ibadah sa'i, datanglah setan menggoda. Setan mencoba berlomba dengannya, tetapi Ibrahim berhasil mendahuluinya sampai ke Jumrah Aqabah. Setan menggodanya lagi, tetapi Ibrahim melemparinya dengan batu tujuh kali hingga dia lari. Pada waktu jumratul wustha datang lagi setan menggodanya, tetapi dilempari oleh Ibrahim tujuh kali. Kemudian Ibrahim menyuruh anaknya menelungkupkan mukanya untuk segera disembelih. Ismail waktu itu sedang mengenakan baju gamis (panjang) putih. Dia berkata kepada bapaknya, \"Wahai bapakku, tidak ada kain untuk mengafaniku kecuali baju gamisku ini, maka lepaskanlah supaya kamu dengan gamisku dapat mengafaniku.\" Maka Ibrahim mulai menanggalkan baju gamis itu, tapi pada saat itulah ada suara di belakang menyerunya, \"Hai Ibrahim, kamu sudah melaksanakan dengan jujur mimpimu.\" Ibrahim segera berpaling, tiba-tiba seekor domba putih ada di hadapannya.","Karena terus membangkang bahkan menantang maka Allah menurunkan azab-Nya. Dalam ayat-ayat ini Allah menjelaskan bahwa Ia menyelamatkan Nabi Lut dan pengikut-pengikutnya yang beriman dan menghancurkan mereka yang membangkang termasuk istrinya. Cara Allah menyelamatkan Nabi Lut dan pengikutnya adalah memerintahkan mereka keluar dari negeri itu tengah malam sehingga sebelum subuh mereka harus sudah berada di luar negeri itu, sebagaimana dilukiskan pada ayat berikut:\n\nMereka (para malaikat) berkata, \"Wahai Lut! Sesungguhnya kami adalah para utusan Tuhanmu, mereka tidak akan dapat mengganggu kamu, sebab itu pergilah beserta keluargamu pada akhir malam dan jangan ada seorang pun di antara kamu yang menoleh ke belakang, kecuali istrimu. Sesungguhnya dia (juga) akan ditimpa (siksaan) yang menimpa mereka. Sesungguhnya saat terjadinya siksaan bagi mereka itu pada waktu subuh. Bukankah subuh itu sudah dekat?\" (Hud\u002F11: 81). \n\nKetika subuh tiba datanglah bencana yang dijanjikan itu, yaitu negeri itu dibalikkan sehingga mereka yang kafir itu terkubur di dalam bumi. Firman Allah:\n\nMaka ketika keputusan Kami datang, Kami menjungkirbalikkannya negeri kaum Lut, dan Kami hujani mereka bertubi-tubi dengan batu dari tanah yang terbakar, yang diberi tanda oleh Tuhanmu. Dan siksaan itu tiadalah jauh dari orang yang zalim. (Hud\u002F11: 82-83)\n\nDi samping itu negeri itu dilanda topan besar yang membawa batu-batu sehingga menghancurkan dan menguburkan negeri itu, sebagaimana diinformasikan pada ayat lain:\n\nKaum Lut pun telah mendustakan peringatan itu. Sesungguhnya Kami kirimkan kepada mereka badai yang membawa batu-batu (yang menimpa mereka), kecuali keluarga Lut. Kami selamatkan mereka sebelum fajar menyingsing. (al-Qamar\u002F54: 33-34)\n\nKarena Nabi Lut, sebagian keluarganya, dan pengikutnya yang beriman sudah berada di luar kota mereka, maka mereka semua selamat. Yang tidak selamat adalah seorang perempuan tua, yaitu istrinya. Ia lebih mematuhi kaumnya yang ingkar daripada mengikuti Nabi Lut. Oleh karena itu, ia tetap tinggal di negeri itu, sehingga ikut menjadi korban.","Karena begitu kerasnya sikap kaum Nabi Yunus terhadap ajakan untuk memeluk agama tauhid, Nabi Yunus marah, lalu mengancam mereka bahwa tidak lama lagi mereka akan ditimpa bencana sebagai hukuman dari Allah. Ia kemudian meninggalkan mereka dan tidak lama kemudian ancaman itu memang terbukti, karena mereka telah melihat tanda-tanda azab itu dari jauh berupa awan tebal yang hitam. Sebelum azab itu sampai, mereka keluar dari kampung mereka bersama istri-istri dan anak-anak mereka menuju padang pasir. Di sana mereka bertobat dan berdoa agar Allah tidak menurunkan azab-Nya. Tobat mereka diterima oleh Allah dan doa mereka dikabulkan, sebagaimana dinyatakan dalam ayat lain:\n\nMaka mengapa tidak ada (penduduk) suatu negeri pun yang beriman, lalu imannya itu bermanfaat kepadanya selain kaum Yunus? Ketika mereka (kaum Yunus itu) beriman, Kami hilangkan dari mereka azab yang menghinakan dalam kehidupan dunia, dan Kami beri kesenangan kepada mereka sampai waktu tertentu. (Yunus\u002F10: 98)\n\nSementara itu, Nabi Yunus dalam pelariannya menumpang pada sebuah kapal yang sarat muatan barang dan penumpang. Di tengah laut kapal diterpa gelombang besar, yang dipercayai mereka sebagai suatu tanda bahwa ada seorang budak pelarian di dalam kapal itu. Orang itu harus diturunkan. Karena tidak ada yang mau terjun ke laut secara sukarela, diadakanlah undian dengan melemparkan anak-anak panah sebagaimana kebiasaan masyarakat waktu itu. Siapa yang anak panahnya menancap berarti ia kalah dan harus terjun ke laut. Dalam undian itu yang menancap anak panahnya adalah anak panah Nabi Yunus. Namun para penumpang tidak mau melemparkan beliau ke dalam laut secara paksa karena mereka hormat kepadanya. Diadakanlah undian sekali lagi, tetapi yang kalah tetap Nabi Yunus. Diadakan sekali lagi, juga demikian. Akhirnya Nabi Yunus sendiri membuka bajunya, dan terjun ke laut.\n\nAllah lalu memerintahkan seekor ikan amat besar menelan Nabi Yunus, tetapi tidak memakannya. Dalam perut ikan besar itu tentu saja Nabi Yunus menderita. Ia merasa terpenjara. Ia merasa tersiksa karena telah meninggalkan kaumnya. Ia kemudian bertobat.","Pada ayat-ayat ini Allah menegaskan bahwa kaum kafir Mekah itu bersama sembahan-sembahan mereka, yaitu patung-patung dan berhala-berhala itu, tidak akan bisa mempengaruhi dan menyesatkan mereka yang beriman. Hal itu karena dasar iman mereka mempertuhankan patung-patung itu tidak ada. Begitu juga menyatakan bahwa malaikat itu adalah anak-anak perempuan Allah. Dasar suatu keimanan adalah wahyu, sedangkan Allah tidak pernah menurunkan wahyu tentang benarnya penyembahan berhala dan tentang malaikat sebagai putrinya. Di samping itu mereka yang beriman kepada Allah, iman mereka kuat sehingga tidak akan terpengaruh oleh akidah mereka yang keliru. Bila ada yang terpengaruh, maka mereka adalah calon-calon penghuni neraka juga, yaitu orang-orang yang lemah imannya. Mereka nanti akan dimasukkan ke dalam neraka Jahim bersama orang-orang yang mempengaruhinya.","Dijelaskan bahwa kaum kafir Mekah itu sebelum kedatangan Nabi Muhammad sebenarnya sudah berjanji bahwa seandainya mereka memiliki kitab suci yang berisi pedoman seperti yang dimiliki oleh kaum Yahudi dan Nasrani, mereka akan beriman dan melaksanakan perintah yang tertera dalam kitab suci itu dengan sepatuh-patuhnya. Mereka mengharapkan datangnya seorang rasul untuk membimbing mereka menuju kebahagiaan di dunia dan akhirat seperti yang dipunyai mereka. Mereka ingin pula mengalami kejayaan seperti yang pernah dialami kedua kaum itu di bawah nabi mereka masing-masing, karena umat di bawah pimpinan nabi pastilah terjamin kebahagiaan dan kejayaannya.","Allah menegaskan bahwa ketetapan-Nya telah berlaku sejak semula berkenaan dengan para rasul-Nya. Mereka itu dibela oleh Allah dan hamba-hamba-Nya yang beriman akan menang. Pernyataan bahwa Allah akan membantu para rasul-Nya ditegaskan dalam ayat lain:\n\nSesungguhnya Kami akan menolong rasul-rasul Kami dan orang-orang yang beriman dalam kehidupan dunia dan pada hari tampilnya para saksi (hari Kiamat) (al-Mu'min\u002F40: 51)\n\nBahwa para rasul Allah beserta kaum yang beriman akan menang ditegaskan pula dalam ayat lain:\n\nAllah telah menetapkan, \"Aku dan rasul-rasul-Ku pasti menang.\" Sungguh, Allah Mahakuat, Mahaperkasa. (al-Mujadilah\u002F58: 21)\n\nBukti ketetapan Allah itu sudah jelas dari pengalaman umat-umat terdahulu sebagaimana sudah dibaca kisah-kisah mereka dalam ayat-ayat sebelum ini, yaitu bahwa rasul-rasul Allah beserta mereka yang beriman mendapat pertolongan dari-Nya, sedangkan umat mereka yang durhaka mengalami kehancuran. Begitu pulalah Nabi Muhammad saw, beliau dan pengikutnya akan dibantu oleh Allah sebagaimana rasul-rasul-Nya yang lain, dan beliau beserta kaum Muslimin akan menang menghadapi kaum kafir Mekah, cepat atau lambat.","Selanjutnya Allah memerintahkan Nabi Muhammad saw agar bertasbih mensucikan Allah dari segala sifat kekurangan dan kelemahan. Allah Mahaperkasa, tidak lemah sebagaimana pandangan kaum kafir itu, yang membutuhkan anak, teman hidup, dan tidak mampu memenangkan mereka yang beriman atau menjatuhkan azab segera. Karena Ia Mahasuci dari sifat kekurangan dan kelemahan itu, maka ia pasti akan menghukum yang kafir dan jahat dan membahagiakan yang beriman dan berbuat baik.\n\nKepada para rasul dan pengikut mereka, khususnya kepada Nabi Muhammad dan umat Islam, Allah memberikan selamat, yaitu memastikan bahwa mereka memperoleh kemenangan di dunia dan kebahagiaan nanti di akhirat, yaitu menjadi penghuni surga.\n\nDengan hancurnya mereka yang membangkang dan diazabnya mereka di dalam neraka, dan menangnya mereka yang beriman dan masuknya mereka menjadi penghuni surga, berarti Allah Mahaadil dan Mahakuasa. Ia memberi ganjaran yang baik sesuai dengan kebaikannya dan membalas perbuatan yang jahat sesuai dengan kejahatannya. Dengan demikian terbuktilah bahwa Ia terpuji dan memang patut selalu dipuji.","ListItem","Ayat ke 3, QS As-Saffat (Barisan-Barisan) | Baca-Quran.id","Ayat ke 3, QS As-Saffat (Barisan-Barisan) beserta terjemahan dan tafsir dari Kemenag, ❌ tanpa iklan, ❌ tanpa analitik, ✅ gratis sepenuhnya",3,"وَتَرَكْنَا عَلَيْهِ فِى الْاٰخِرِيْنَ ۖ ","اِلَّا عِبَادَ اللّٰهِ الْمُخْلَصِيْنَ","kecuali hamba-hamba Allah yang disucikan (dari dosa).","Sungguh, dia termasuk hamba-hamba Kami yang beriman.","Allah memerintahkan Nabi Muhammad menanyakan kepada orang-orang yang mengingkari adanya kebangkitan dari kubur tentang mana yang lebih sukar antara menjadikan manusia termasuk orang-orang yang ingkar tadi dengan menjadikan malaikat, langit, bumi, dan segala isinya, yang wujudnya lebih besar dan lebih beraneka ragam.\n\nAllah memerintahkan rasul-Nya supaya mengajukan pertanyaan kepada mereka, dimaksudkan sebagai celaan terhadap sikap keras kepala mereka. Sebenarnya, mereka sendiri mengakui bahwa penciptaan langit, bumi, dan segala isinya yang besar itu lebih sukar dari menciptakan manusia. Maka bagaimana mereka dapat mengingkari kebangkitan itu, padahal mereka menyaksikan suatu yang lebih sukar dari apa yang mereka ingkari itu.\n\nFirman Allah:\n\nDan bukankah (Allah) yang menciptakan langit dan bumi, mampu menciptakan kembali yang serupa itu (jasad mereka yang sudah hancur itu)? Benar, dan Dia Maha Pencipta, Maha Mengetahui. (Yasin\u002F36: 81)\n\nDalam ayat lain Allah berfirman:\n\nSungguh, penciptaan langit dan bumi itu lebih besar daripada penciptaan manusia, akan tetapi kebanyakan manusia tidak mengetahui. (al-Mu'min\u002F40: 57) \n\nUntuk menjelaskan perbandingan ini Allah memberikan tambahan penjelasan dengan menyebutkan kejadian nenek moyang mereka, yaitu Adam dari tanah liat. Proses kejadian Adam itu menunjukkan kepada mereka tentang kesederhanaan penciptaannya jika dibandingkan dengan penciptaan alam semesta yang mahabesar ini. Bilamana Allah kuasa menciptakan alam ini, tentulah lebih kuasa lagi menghidupkan kembali anak cucu Adam pada hari Kiamat.\n\nRasulullah kemudian diperingatkan Allah agar jangan terlalu mengharapkan berimannya mereka yang keras kepala. Tidak ada manfaat bagi mereka segala keterangan dan peringatan itu karena mereka tidak tertarik. Bahkan orang-orang kafir itu memperolok-olokkan Rasul, sehingga Rasulullah sendiri merasa heran.\n\nSesungguhnya hati mereka telah tertutup, dan jiwa mereka tidak dapat menjangkau keyakinan yang seperti itu. Mereka tidak mampu lagi melihat keterangan-keterangan dan tanda-tanda yang dapat menunjukkan kebangkitan dari kubur. Bahkan kesombongan dan pembangkangan mereka telah sampai ke puncaknya. Mereka memperolok-olokkan apa yang telah diucapkan oleh Nabi Muhammad saw, dan meremehkan kesungguhan beliau supaya mereka meyakini hari kebangkitan itu.","Allah menunjukkan keingkaran kaum musyrikin terhadap peristiwa-peristiwa pada hari Kiamat. Kejadian-kejadian pada hari Kiamat itu membingungkan akal mereka. Mereka sama sekali tidak dapat mengerti apa yang dikatakan Nabi Muhammad bahwa tulang-belulang yang berserakan dan sudah menjadi tanah dapat dihidupkan kembali. Lebih mengherankan mereka lagi adalah kebangkitan nenek moyang mereka yang sudah lama terkubur dalam bumi, yang tidak ada bekasnya lagi, sehingga dengan demikian nenek moyang mereka itu tidak dapat hidup kembali. Semua ini ditanyakan mereka kepada Nabi saw.","Allah memerintahkan Nabi Muhammad agar menjawab pertanyaan mereka secara tegas bahwa benar mereka dan nenek moyangnya akan dibangkitkan kembali sesudah menjadi tanah. Mereka yang ingkar itu menjadi hina di hadapan Allah Yang Mahatinggi. Sebagaimana Allah berfirman:\n\nSesungguhnya orang-orang yang sombong tidak mau menyembah-Ku akan masuk neraka Jahanam dalam keadaan hina dina. (al-Mu'min\u002F40: 60)\n\nDalam ayat lain Allah berfirman:\n\n¦ Dan semua mereka datang menghadap-Nya dengan merendahkan diri. (an-Naml\u002F27: 87)\n\nTerjadinya hari Kiamat sangatlah mudah bagi Allah. Dengan satu teriakan saja yang ditiupkan dari sangkakala manusia akan bangkit dari kubur dan hidup kembali. Pada waktu itu, mereka akan menyaksikan terlaksananya ancaman Allah.","Pada ayat ini, Allah menjelaskan keluhan orang-orang yang ingkar akan hari Kiamat. Ketika mereka melihat azab yang akan menimpanya, mereka menjadi sadar akan ancaman Allah melalui lisan para rasul dan hukuman yang akan mereka terima pada hari itu atas perbuatannya ketika di dunia. Mereka memperolok-olokkan dan mendustakan para rasul serta mengingkari kebenaran ajaran yang dibawanya. Pada hari Kiamat mereka menyesali perbuatan dan kata-kata demikian itu terhadap diri sendiri. Mereka sadar bahwa hari pembalasan sudah datang. \n\nPada hari Kiamat itu akan jelas perbedaan antara orang yang baik dan kebajikan yang dibuatnya dengan orang-orang jelek dengan kejahatan yang dilakukannya.\n\nOrang-orang yang telah berbuat baik akan dimasukkan ke surga Na'im. Sedang orang-orang yang telah berbuat fasik dan durhaka akan dimasukkan ke neraka Saqar. Firman Allah:\n\nDan tahukah kamu apa (neraka) Saqar itu? Ia (Saqar itu) tidak meninggalkan dan tidak membiarkan, yang menghanguskan kulit manusia. (al-Muddatstsir\u002F74: 27-29)","Kemudian pada hari itu diperintahkan kepada malaikat Zabaniyah untuk mengumpulkan orang-orang yang telah berbuat zalim, agar pergi ke tempat hukuman menurut kelompok perbuatan dosa mereka masing-masing, yaitu para pezina sesama pezina, pemakan riba sesama pemakan riba, demikianlah seterusnya. Demikian pula penyembah-penyembah berhala dikumpulkan bersama berhalanya agar mereka tambah merasa malu dan sedih. Lalu mereka digiring menuju neraka Jahim. Allah berfirman:\n\nDan Kami akan mengumpulkan mereka pada hari Kiamat dengan wajah tersungkur, dalam keadaan buta, bisu, dan tuli. Tempat kediaman mereka adalah neraka Jahanam. Setiap kali nyala api Jahanam itu akan padam, Kami tambah lagi nyalanya bagi mereka. (al-Isra'\u002F17: 97)","Pada hari Kiamat terjadi perdebatan antara pemimpin dengan pengikut-pengikutnya. Para pengikut itu melemparkan pertanggungjawaban kepada para pemimpin mereka atas kesesatan dan kekafiran mereka. Mereka menyatakan bahwa para pemimpin itulah yang mencegah mereka berbuat kebaikan, dan menghalang-halangi mereka serta memaksa mereka untuk memeluk keyakinan pemimpin-pemimpin itu. Perbantahan mereka sebagaimana di atas itu dilukiskan oleh Allah dalam firman-Nya:\n\nDan (ingatlah), ketika mereka berbantah-bantahan dalam neraka, maka orang yang lemah berkata kepada orang-orang yang menyombongkan diri, \"Sesungguhnya kami dahulu adalah pengikut-pengikutmu, maka dapatkah kamu melepaskan sebagian (azab) api neraka yang menimpa kami?\" Orang-orang yang menyombongkan diri menjawab, \"Sesungguhnya kita semua sama-sama dalam neraka karena Allah telah menetapkan keputusan antara hamba-hamba-(Nya).\" (al-Mu'min\u002F40: 47-48)","Kemudian Allah menerangkan penolakan pemimpin mereka terhadap tuduhan tersebut. Para pemimpin itu menyatakan bahwa mereka tidak menyesatkan orang itu. Para pengikut sendirilah yang karena tabiatnya, menjadi kafir dan melakukan perbuatan syirik dan maksiat. Mereka mempersekutukan Allah dengan berhala dan patung dan berbuat macam-macam dosa yang menjadikan hatinya tertutup sehingga tidak lagi mengetahui jalan yang benar lagi baik.\n\nSelanjutnya pemimpin-pemimpin itu membantah bahwa mereka memiliki kekuasaan atas pengikut-pengikutnya itu, menyesatkan dan mengkafirkannya serta tidak pernah menghalangi mereka menentukan pilihan, mana perbuatan yang buruk dan mana perbuatan yang baik. Tetapi kecenderungan pengikut-pengikut itu sendiri yang menyebabkan mereka berbuat kekafiran dan kemaksiatan.","Pada hari Kiamat penyembah-penyembah berhala itu mengakui bahwa mereka dulunya bersikap melampaui batas karena pembawaan dan tabiat mereka sendiri yang cenderung kepada kekafiran dan kejahatan. Maka sepatutnyalah bilamana pada hari Kiamat itu mereka menerima hukuman dari Allah.\n\nBalasan baik atau buruk terhadap suatu perbuatan adalah akibat yang wajar, karena perbuatan itu dilakukan dengan penuh kesadaran. Maka masing-masing orang tidaklah perlu menyalahkan orang lain, kecuali kepada dirinya sendiri. Tidaklah wajar bila satu golongan lain saling menyalahkan. Masing-masing seharusnya menerima balasan atas perbuatannya. Mereka yang taat kepada Allah dan Rasul-Nya mendapat pahala dunia dan akhirat, dan mereka yang sesat akan masuk neraka. Demikian janji Tuhan yang disampaikan kepada manusia melalui rasul-rasul-Nya. Penyembah-penyembah berhala teman-teman setan mengetahui janji Tuhan itu namun mereka berpaling juga dari kebaikan dan ketaatan.\n\nGolongan pemimpin-pemimpin pada waktu itu menyatakan bahwa merekalah yang menyesatkan pengikut-pengikutnya itu. Mereka berbuat demikian karena keinginan mereka agar pengikut-pengikut itu mengikuti jejak mereka. Namun sesungguhnya tabiat dan usaha-usaha pengikut-pengikut itu sendirilah yang menyebabkan mereka berbuat kekafiran dan durhaka sehingga dengan demikian mereka menderita azab seperti diperingatkan sebelumnya oleh para rasul.","Pada ayat ini Allah menegaskan bahwa azab ditimpakan kepada pemimpin-pemimpin dan pengikut-pengikutnya. Kedua golongan itu saling menuduh dan melempar tanggung jawab, namun mereka sama-sama dalam kesesatan. Yang menyesatkan tentulah menerima hukuman lebih berat. Mereka tidak hanya menanggung beban mereka sendiri, tetapi juga harus menanggung beban orang-orang yang mereka sesatkan.\n\nHukuman yang dijatuhkan Tuhan kepada kaum musyrikin itu sesuai dengan keadilan Tuhan terhadap hamba-hamba-Nya. Semua orang yang berdosa akan mendapat hukuman sesuai dengan kejahatannya. Demikian pula orang yang berbuat kebaikan akan diberi balasan sesuai dengan kebaikannya.","Kemudian Allah menguraikan sebagian penyebab hukuman yang ditimpakan kepada orang-orang yang berdosa itu. Sewaktu di dunia mereka menolak ajaran tauhid ketika disampaikan kepada mereka dan berpaling tidak mau mendengarkan bacaan kalimat tauhid \"La ilaha illallah\" yang artinya, \"tidak ada Tuhan yang patut disembah kecuali Allah\". Alasan penolakan mereka ialah kemustahilan bagi mereka meninggalkan sembahan-sembahan nenek moyangnya.\n\nMereka mewarisi tradisi penyembahan berhala dan patung secara turun-temurun. Menurut mereka hal itu suatu kebenaran yang terus-menerus harus dipegang. Keyakinan itu tidak akan ditinggalkan hanya untuk mendengarkan perkataan seseorang penyair gila yang tidak patut didengarkan pembicaraannya dan tidak perlu pula didengar ajaran-ajarannya. Perkataan Nabi menurut mereka penuh dengan khayalan.\n\nPernyataan orang kafir yang diucapkan di hadapan Nabi sewaktu hidup di dunia dengan penuh kesombongan, menunjukkan bahwa mereka mengingkari keesaan Allah, dan mengingkari kerasulan Muhammad saw. Keingkaran pertama ialah penolakan dengan sombong mendengarkan ajaran tauhid dan keingkaran kedua, pernyataan ketidakmungkinan meninggalkan sembahan-sembahan itu untuk mematuhi Rasul yang dituduhnya seorang yang gila.","Allah menceritakan kenikmatan yang diberikan kepada kaum yang taat kepada Allah dan rasul-Nya. Mereka dengan penuh keikhlasan melakukan amal kebajikan, menjauhi segala bentuk kemaksiatan dan kemungkaran, bersih dari dosa selalu memanjatkan doa dan harapan kepada Tuhan mereka. Itulah hamba-hamba Allah yang ikhlas, yang akan mendapatkan surga, sebagaimana firman Allah:\n\nSungguh, Kami telah menciptakan manusia dalam bentuk yang sebaik-baiknya, kemudian Kami kembalikan dia ke tempat yang serendah-rendahnya, kecuali orang-orang yang beriman dan mengerjakan kebajikan; maka mereka akan mendapat pahala yang tidak ada putus-putusnya. (at-Tin\u002F95: 4-6)\n\nDan firman Allah:\n\nDemi masa, sungguh, manusia berada dalam kerugian, kecuali orang-orang yang beriman dan mengerjakan kebajikan serta saling menasihati untuk kebenaran dan saling menasihati untuk kesabaran. (al-'A.shr\u002F103: 1-3)\n\nGolongan hamba Allah yang ikhlas itu, tidak akan merasakan azab, tidak akan ditanya pada hari hisab, bahkan mereka mungkin diampuni kesalahannya jika ada kesalahan, dan diberi ganjaran pahala sepuluh kali lipat dari tiap amal saleh yang dikerjakannya atau lebih besar dari itu dengan kehendak Allah.\n\nKepada mereka inilah Allah memberikan rezeki yang telah ditentukan yakni buah-buahan yang beraneka ragam harum baunya dan rasanya amat lezat sehingga membangkitkan selera untuk menikmatinya. Mereka hidup mulia serta mendapat pelayanan dan penghormatan.\n\nDari ayat-ayat di atas, dapat dipahami bahwa makanan di surga itu disediakan untuk kenikmatan dan kesenangan.\n\n(43-44) Pada ayat ini, Allah menjelaskan lebih lanjut hamba-hamba Allah yang beriman dan beramal saleh dan surga yang penuh nikmat yang mempunyai tempat-tempat yang tinggi yang di bawahnya terdapat sungai-sungai yang mengalir, sebagaimana dijelaskan Allah dalam firman-Nya:\n\nDan orang-orang yang beriman dan mengerjakan kebajikan, sungguh, mereka akan Kami tempatkan pada tempat-tempat yang tinggi (di dalam surga), yang mengalir di bawahnya sungai-sungai, mereka kekal di dalamnya. Itulah sebaik-baik balasan bagi orang yang berbuat kebajikan. (al-'Ankabut\u002F29: 58)\n\nAhli surga itu duduk di atas kursi yang megah berhadap-hadapan satu sama lain agar saling mengenal dan mereka berbincang-bincang tentang hal-hal yang menyenangkan, yang memberikan mereka kepuasan rohani dan jasmani sebagaimana diterangkan Allah dengan firman-Nya:\n\nDan sebagian mereka berhadap-hadapan satu sama lain saling bertegur sapa. (ath-thur\u002F52: 25)","Pada ayat ini, Allah menjelaskan lebih lanjut hamba-hamba Allah yang beriman dan beramal saleh dan surga yang penuh nikmat yang mempunyai tempat-tempat yang tinggi yang di bawahnya terdapat sungai-sungai yang mengalir, sebagaimana dijelaskan Allah dalam firman-Nya:\n\nDan orang-orang yang beriman dan mengerjakan kebajikan, sungguh, mereka akan Kami tempatkan pada tempat-tempat yang tinggi (di dalam surga), yang mengalir di bawahnya sungai-sungai, mereka kekal di dalamnya. Itulah sebaik-baik balasan bagi orang yang berbuat kebajikan. (al-'Ankabut\u002F29: 58)\n\nAhli surga itu duduk di atas kursi yang megah berhadap-hadapan satu sama lain agar saling mengenal dan mereka berbincang-bincang tentang hal-hal yang menyenangkan, yang memberikan mereka kepuasan rohani dan jasmani sebagaimana diterangkan Allah dengan firman-Nya:\n\nDan sebagian mereka berhadap-hadapan satu sama lain saling bertegur sapa. (ath-thur\u002F52: 25)","Kemudian Allah menyebutkan lagi dalam ayat ini kecantikan istri ahli-ahli surga sebagai penyempurnaan terhadap nikmat yang diberikan Tuhan kepada mereka di akhirat. Istri-istri mereka itu merupakan bidadari-bidadari yang cantik, tidak suka melihat orang-orang yang bukan suaminya, matanya jeli, kulitnya putih kuning bersih seperti warna telur burung unta yang belum pernah disentuh orang-orang dan belum dikotori debu. Warna kulit perempuan demikian sangat disenangi oleh orang Arab.\n\nPada ayat yang lain digambarkan para bidadari itu bagaikan mutiara. Firman Allah:\n\nDan ada bidadari-bidadari yang bermata indah, laksana mutiara yang tersimpan baik. (al-Waqi'ah\u002F56: 22-23)","Pada ayat ini Allah memperingatkan kepada orang-orang kafir tentang azab yang mereka alami di neraka. Kepada mereka dikemukakan pertanyaan tentang manakah hidangan yang lebih baik apakah rezeki yang diberikan kepada penghuni surga sebagaimana telah disebutkan di atas ataukah buah pohon zaqqum yang pahit lagi menjijikkan yang disediakan bagi mereka.\n\nPertanyaan itu adalah sebagai ejekan kepada mereka. Namun kemudian mereka mempertanyakan tentang pohon zaqqum. Mungkinkah dia tumbuh dalam neraka, padahal neraka itu membakar segalanya. Bagi mereka pohon zaqqum itu merupakan ujian dan cobaan dan di akhirat akan dijadikan bahan siksaan. Allah berfirman:\n\nDan Kami tidak menjadikan mimpi yang telah Kami perlihatkan kepadamu, melainkan sebagai ujian bagi manusia dan (begitu pula) pohon yang terkutuk (zaqqum) dalam Al-Qur'an. Dan Kami menakut-nakuti mereka, tetapi yang demikian itu hanyalah menambah besar kedurhakaan mereka. (al-Isra'\u002F17: 60)","Allah menegaskan bahwa pohon zaqqum itu tumbuh dari dasar neraka yang menyala-nyala. Dahan-dahannya menjulang tinggi, setinggi nyala api neraka. Pohon itu tumbuh dari dalam api dan dari api pula dia dijadikan. Bayangannya seperti kepala setan, sangat buruk dan menjijikkan. Orang Arab dalam menggambarkan sesuatu yang sangat buruk dan menjijikkan mengumpamakannya dengan setan, misalnya seperti kepala setan. Akan tetapi, sebenarnya wujud setan itu tidak ada yang mengetahui.\n\nHanya saja khayalan manusia menggambarkannya sangat buruk. Sebaliknya dalam menggambarkan sesuatu yang indah, mereka mengumpamakannya dengan malaikat. Karena itu Tuhan mempergunakan kata malaikat dalam menggambarkan ketampanan Yusuf dalam firman-Nya:\n\n¦Ini bukanlah manusia. Ini benar-benar malaikat yang mulia. (Yusuf\u002F12: 13)","Pada ayat ini Allah menerangkan sebab orang-orang kafir itu terjerumus ke dalam penderitaan azab yang sangat berat. Yaitu bahwa mereka sesudah mendengar seruan yang disampaikan Nabi Muhammad saw, benar-benar mengetahui dan menyadari kesesatan nenek moyang mereka tanpa mengindahkan peringatan Rasulullah saw. Mereka terlalu terburu-buru dan fanatik mengikuti nenek moyang sehingga pikiran yang sehat dikesampingkan, seolah-olah mereka tidak sempat merenungkan peringatan-peringatan Rasul.\n\nKelakuan demikian itu sangat tercela karena tidak saja merugikan bagi pelakunya tetapi juga generasi-generasi yang hidup berikutnya. Kemunduran dan kehancuran akan menimpa umat, bilamana daya berpikir dan berprakarsa tidak berkembang pada mereka. Kebahagiaan akan dapat dicapai bilamana umat itu terus-menerus mengembangkan daya berpikir mereka dengan pengamatan dan penelitian kehidupan spiritual dan material.","Ayat ini mengisahkan bahwa Nabi Ibrahim dalam perantauan memohon kepada Tuhan agar dianugerahi seorang anak yang saleh dan taat serta dapat menolongnya dalam menyampaikan dakwah dan mendampinginya dalam perjalanan dan menjadi kawan dalam kesepian.\n\nKehadiran anak itu sebagai pengganti dari keluarga dan kaumnya yang ditinggalkannya. Permohonan Nabi Ibrahim ini diperkenankan oleh Allah. Kepadanya disampaikan berita gembira bahwa Allah akan menganugerahkan kepadanya seorang anak laki-laki yang punya sifat sangat sabar.\n\nSifat sabar itu muncul pada waktu balig. Karena pada masa kanak-kanak sedikit sekali didapati sifat-sifat seperti sabar, tabah, dan lapang dada. Anak remaja itu ialah Ismail, anak laki-laki pertama dari Ibrahim, ibunya bernama Hajar istri kedua dari Ibrahim. Putra kedua ialah Ishak, lahir kemudian sesudah Ismail dari istri pertama Ibrahim yaitu Sarah.","Pada ayat ini ditegaskan bahwa apa yang dialami Ibrahim dan putranya itu merupakan batu ujian yang amat berat. Memang hak Allah untuk menguji hamba yang dikehendaki-Nya dengan bentuk ujian yang dipilih-Nya berupa beban dan kewajiban yang berat. Bila ujian itu telah ditetapkan, tidak seorang pun yang dapat menolak dan menghindarinya. Di balik cobaan-cobaan yang berat itu, tentu terdapat hikmah dan rahasia yang tidak terjangkau oleh pikiran manusia.\n\nIsmail yang semula dijadikan kurban untuk menguji ketaatan Ibrahim, diganti Allah dengan seekor domba besar yang putih bersih dan tidak ada cacatnya. Peristiwa penyembelihan kambing oleh Nabi Ibrahim ini yang menjadi dasar ibadah kurban untuk mendekatkan diri kepada Allah, dilanjutkan oleh syariat Nabi Muhammad. Ibadah kurban ini dilaksanakan pada hari raya haji\u002Fraya kurban atau pada hari-hari tasyriq, yakni tiga hari berturut-turut sesudah hari raya kurban, tanggal 11, 12, 13 Zulhijah.\n\nHewan kurban terdiri dari binatang-binatang ternak seperti unta, sapi, kerbau, dan kambing. Diisyaratkan binatang kurban itu tidak cacat badannya, tidak sakit, dan cukup umur. Menyembelih binatang untuk kurban ini hukumnya sunnah muakkadah(sunah yang ditekankan).\n\nFirman Allah:\n\nMaka laksanakanlah salat karena Tuhanmu, dan berkurbanlah (sebagai ibadah dan mendekatkan diri kepada Allah). (al-Kautsar\u002F108: 2)\n\nDengan disyariatkannya ibadah kurban dalam agama Islam, maka peristiwa Ibrahim menyembelih anaknya akan tetap dikenang selama-lamanya dan diikuti oleh umatnya. Ibadah kurban juga menyemarakkan agama Islam karena daging-daging kurban itu dibagi-bagikan kepada masyarakat terutama kepada fakir miskin.","Pada ayat ini, Allah menjelaskan enam nikmat yang telah diberikan kepada Musa dan Harun. Nikmat-nikmat itu ialah\n\nPertama, Musa, Harun, dan kaumnya diselamatkan dari bencana yang besar. Sejak lama, orang Israil hidup di Mesir di bawah kekuasaan Fir'aun. Mereka disuruh melakukan pekerjaan yang berat dengan paksa dan diperlakukan sebagai budak belian. Bahkan anak laki-laki mereka banyak yang dibunuh dan anak-anak perempuan dibiarkan hidup atas perintah dan ramalan dukun-dukun yang mengelilingi Fir'aun. Hampir saja mereka mengalami kemusnahan, jika Musa dan Harun tidak datang menyelamatkan mereka.\n\nKedua, di samping tertolongnya mereka dari kejaran Fir'aun, bahkan Firaun tenggelam di dasar laut, Bani Israil berhasil pula mengalahkan musuh-musuh lainnya, dan merebut kembali negeri-negeri mereka. Mereka kembali dapat mengumpulkan harta kekayaan yang mereka peroleh sepanjang hidup, menjadi bangsa yang kuat, serta memiliki kekuatan dan kekuasaan hingga memiliki negara yang besar seperti zaman raja Talut dan Daud. Firman Allah:\n\nMaka mereka mengalahkannya dengan izin Allah, dan Daud membunuh Jalut. Kemudian Allah memberinya (Daud) kerajaan, dan hikmah, dan mengajarinya apa yang Dia kehendaki¦. (al-Baqarah\u002F2: 251)","Dua ayat ini menjelaskan nikmat yang diberikan Allah kepada Bani Israil. Dua macam nikmat yang lalu merupakan kenikmatan lahiriah maka dua macam berikut ini kenikmatan batiniah, yakni dua macam anugerah Tuhan yang menyelamatkan dan meningkatkan jiwa dan akhlak mereka.\n\nKetiga, Allah memberikan kepada Musa dan Harun kitab Taurat yang sangat jelas dan memuat ketentuan-ketentuan dan petunjuk baik untuk kebahagiaan hidup di dunia maupun akhirat. Allah berfirman:\n\nDan sungguh, Kami telah memberikan kepada Musa dan Harun, Furqan (Kitab Taurat) dan penerangan serta pelajaran bagi orang-orang yang bertakwa. (al-Anbiya'\u002F21: 48)\n\nKitab ini diwariskan kepada Bani Israil untuk dijadikan pegangan hidup mereka. Firman Allah:\n\nDan sungguh, Kami telah memberikan petunjuk kepada Musa; dan mewariskan Kitab (Taurat) kepada Bani Israil untuk menjadi petunjuk dan peringatan bagi orang-orang yang berpikiran sehat. (al-Mu'min\u002F40: 53-54)\n\nKeempat, Allah menunjukkan jalan kebenaran kepada keduanya untuk menuju kepada kebahagiaan yang hakiki. Dengan akal pikiran, keduanya menjalankan dan mengikuti petunjuk-petunjuk Ilahi, baik dalam bidang akidah maupun muamalah, dan Allah masih menganugerahkan kepada mereka taufik dan perlindungan-Nya.","Kemudian Allah menerangkan kenikmatan lain yang merupakan kemuliaan yang diberikan-Nya kepada Musa dan Harun, sebagaimana yang diberikan Allah kepada Nuh dan Ibrahim. Kemuliaan itu ialah:\n\nAllah mengabadikan sebutan keharuman nama keduanya yang mengharumkan di kalangan para nabi dan umat manusia sepanjang masa. Begitu juga dengan pujian dan doa terus diberikan kepadanya.\n\nAllah menyebutkan salam sejahtera bagi Musa dan Harun agar para malaikat, jin, dan manusia menyebutkan salam juga bagi keduanya. Dengan ucapan salam sejahtera itu maka nama mereka akan tetap harum selama-lamanya.","Dua ayat ini menjelaskan bahwa kenikmatan yang besar tersebut di atas seperti kemenangan atas musuh-musuh, petunjuk-petunjuk Tuhan, kemuliaan-kemuliaan, dan sebagainya adalah berkat amal kebajikan yang mereka lakukan, dan pengorbanan serta penderitaan mereka dalam memperjuangkan penegakan agama tauhid. Jadi begitulah Allah memberikan pembalasan pahala dunia-akhirat atas orang-orang yang berbuat kebaikan untuk kemaslahatan sesama umat manusia.\n\nYang mendorong keduanya mengerjakan amal-amal kebajikan dan bersedia mengalami penderitaan adalah iman yang bersemi dalam dada mereka. Dari landasan iman yang kuat lahirlah perbuatan-perbuatan yang mulia, itulah sebabnya Allah menegaskan bahwa keduanya adalah hamba-hamba Allah yang beriman.","Ayat ini menerangkan bahwa kaum Nabi Ilyas menentang Nabi Ilyas. Mereka memandang Nabi Ilyas berbohong dengan dakwah yang disampaikannya. Oleh karena itu, mereka menolak untuk kembali kepada agama tauhid. Karena tetap memilih syirik dan tidak kembali ke agama tauhid itu, maka selama di dunia mereka dibiarkan, tetapi di akhirat nanti mereka akan diseret dengan paksa ke dalam neraka. \n\nMereka yang mengerjakan kebaikan dengan ikhlas dihindarkan dari neraka. Mereka disebut al-mukhlishin 'orang yang ikhlas. Setelah keikhlasan mereka dalam beramal begitu kuatnya sehingga sudah menjadi sifatnya, maka Allah menyambut keikhlasan itu sehingga ia dijadikan-Nya sebagai orang yang telah diterima sepenuhnya keikhlasannya. Orang itu disebut al-mukhlashin 'orang yang diikhlaskan-Nya. Dalam Al-Qur'an orang itulah yang tidak mempan digoda oleh setan sebagaimana diakui setan itu sendiri:\n\nIa (Iblis) berkata, \"Tuhanku, oleh karena Engkau telah memutuskan bahwa aku sesat, aku pasti akan jadikan (kejahatan) terasa indah bagi mereka di bumi, dan aku akan menyesatkan mereka semuanya, kecuali hamba-hamba-Mu yang terpilih di antara mereka.\" (al-hijr\u002F15: 39-40)","Pada ayat ini Allah swt mengarahkan sapaan-Nya kepada kaum kafir Mekah, bahwa mereka setiap saat lewat di negeri Sodom yang telah dihancurkan dan sebagiannya tinggal puing-puing itu, karena letaknya di jalur perdagangan antara Mekah dan Syria. Jalur itu sering dilewati kafilah-kafilah dagang mereka. Mereka melewatinya pagi hari atau sore hari. Dari puing-puing itu mereka dapat memperkirakan bagaimana kedahsyatan peristiwa itu. Seharusnya mereka, dan siapa pun sesudah itu, mengambil pelajaran dari peristiwa tersebut dan beriman sebagaimana dinyatakan dalam ayat berikut:\n\nDan sungguh, (negeri) itu benar-benar terletak di jalan yang masih tetap (dilalui manusia). Sungguh, pada yang demikian itu benar-benar terdapat tanda (kekuasaan Allah) bagi orang yang beriman. (al-hijr\u002F15: 76-77)\n\nTetapi mengapa mereka tidak juga mengambil pelajaran dari peristiwa itu dan mengapa mereka tidak juga mau beriman.","Dalam tobatnya ia banyak bertasbih mensucikan Allah dan berdoa. Bunyi tasbih yang terus diulang-ulang Nabi Yunus dicantumkan dalam Surah al-Anbiya'\u002F21: 87: \n\n¦Maka dia berdoa dalam keadaan yang sangat gelap, \"Tidak ada tuhan selain Engkau, Mahasuci Engkau. Sungguh, aku termasuk orang-orang yang zalim.\" (al-Anbiya'\u002F21: 87)\n\nDalam tasbihnya itu, Nabi Yunus mengakui dengan sebenar-benarnya bahwa Tuhan hanyalah Allah. Allah Mahasuci dari segala kekurangan dan sifat-sifat yang tidak pantas bagi-Nya. Dan mengakui bahwa ia telah berbuat salah. Di dalam pengakuan-pengakuan itu terselip doa yang tulus agar ia dilepaskan dari siksaan terpenjara dalam perut ikan itu.\n\nAllah menegaskan bahwa bila ia tidak bertasbih dan berdoa seperti itu, maka ia akan menghuni perut ikan itu sampai hari Kiamat. Karena tasbih dan doanya itulah maka Allah melepaskannya dari dalam perut ikan tersebut, sebagaimana dinyatakan dalam ayat lain:\n\nMaka Kami kabulkan (doa)nya dan Kami selamatkan dia dari kedukaan. Dan demikianlah Kami menyelamatkan orang-orang yang beriman. (al-Anbiya'\u002F21: 88)","Setelah satu, atau tiga, atau beberapa hari, menurut beberapa pendapat, Nabi Yunus berada di dalam perut ikan besar itu, Allah memerintahkan ikan tersebut memuntahkannya ke suatu daerah tandus tidak ditumbuhi tanaman apapun. Karena beberapa saat berada di dalam perut ikan, kondisi Nabi Yunus lemah sekali. Untuk menyelamatkannya dari terpaan panas matahari Allah menumbuhkan pohon yaqthin (sejenis labu) di sampingnya. Daun pohon itu melindunginya dan buahnya jadi makanannya.","Setelah kesehatan Nabi Yunus pulih, Allah mengutusnya kembali kepada kaumnya yang pada waktu itu jumlahnya sudah sampai seratus ribu orang lebih. Kedatangannya mereka sambut dengan baik karena mereka sadar bahwa dahulu mereka telah mengecewakannya sehingga ia meninggalkan mereka. Mereka menyadari telah memperoleh kasih sayang Allah, karena mereka baru beriman ketika tanda-tanda azab Allah telah menghadang mereka. Pada umat-umat yang lalu, iman di saat seperti itu tidak diterima. Hanya umat Nabi Yunus yang dikecualikan dari ketentuan itu, sebagaimana dinyatakan dalam Surah Yunus\u002F10:98 yang sudah diterangkan di atas. Mereka kemudian hidup bahagia dan sentosa sampai waktu yang ditetapkan bagi mereka.","Selanjutnya Allah mengecam lebih keras lagi ucapan atau pandangan mereka bahwa Allah punya anak itu. Allah menegaskan bahwa pandangan mereka itu hanyalah suatu kebohongan besar yang direkayasa. Karena rekayasa seperti itu maka Allah mencap mereka sebagai pembohong-pembohong besar. Untuk mempertegas kecaman terhadap kebohongan mereka itu, Allah bertanya, \"Apakah Ia memilih anak perempuan daripada anak laki-laki?\" Maksudnya: anak perempuan rendah dalam pandangan mereka, dan anak laki-laki mulia, lalu apakah Allah akan memilih anak perempuan dan untuk mereka anak laki-laki? Bila demikian keadaannya berarti Allah bodoh dan mereka pintar. Pandangan itulah yang dikecam Allah, karena Allah tidak mungkin beranak dan tidak memerlukan anak, dan tidak boleh dilecehkan dengan pandangan seperti itu, bahwa untuk Allah cukup anak perempuan sedangkan untuk mereka anak laki-laki. Mereka harus mempertanggungjawabkan dosa besar karena pandangan yang keliru itu dan dosa orang-orang yang mengikutinya. Firman Allah:\n\nMaka apakah pantas Tuhan memilihkan anak laki-laki untukmu dan Dia mengambil anak perempuan dari malaikat? Sungguh, kamu benar-benar mengucapkan kata yang besar (dosanya). (al-Isra'\u002F17: 40)","Kecaman dilanjutkan lagi dengan pertanyaan, \"Bagaimana kalian ini? Bagaimana kalian berpendapat demikian?\" Mereka dikecam karena tidak punya pikiran yang sehat, karena bagaimana mungkin Allah yang menciptakan segala sesuatu di alam ini butuh seorang anak dan anak itu perempuan. Mereka dikecam pula karena, seandainya mereka punya pikiran, mereka keliru dalam berpikir sehingga pikiran itu tidak logis dan tidak dapat diterima akal.\n\nSelanjutnya mereka dikecam bahwa sebenarnya mereka tidak menggunakan pikirannya untuk menganalisa ayat-ayat Allah yang disampaikan, dan tidak mereka ambil menjadi pelajaran padahal hal itu berguna. Kaum kafir Mekah itu sudah mengetahui tentang umat-umat terdahulu, tetapi tidak mengambil hikmah dan pelajaran dari pengalaman umat-umat terdahulu sehingga mereka beriman.","Bantahan lebih lanjut yang disampaikan Allah untuk membantah pandangan kaum kafir Mekah bahwa Allah punya anak yaitu malaikat sebagai anak perempuan-Nya, Allah meminta mereka mengemukakan bukti nyata yang tidak dapat dibantah kebenarannya, baik bukti itu berbentuk fisik maupun berbentuk ungkapan yang terjamin kebenarannya. Bukti fisik, misalnya, bahwa Allah melahirkan malaikat. Bukti non-fisik adalah wahyu. Tentu saja mereka tidak akan bisa mengemukakan bukti-bukti itu, karena memang tidak ada. Dengan demikian firman-Nya berbentuk pertanyaan, \"Atau apakah kalian memiliki bukti yang nyata?\" merupakan sanggahan yang jitu terhadap pandangan mereka bahwa Allah punya anak perempuan tersebut.\n\nApalagi setelah itu Allah meminta mereka menyampaikan kitab suci yang berisi pernyataan bahwa malaikat itu adalah anak-Nya. Kitab suci itu tidak mungkin mereka dapatkan karena Allah tidak pernah menurunkannya. Pada ayat lain Allah berfirman yang isinya sama dengan ayat ini:\n\nAtau pernahkah Kami menurunkan kepada mereka keterangan, yang menjelaskan (membenarkan) apa yang (selalu) mereka persekutukan dengan Tuhan? (ar-Rum\u002F30: 35)","Selanjutnya Allah menegaskan bahwa Ia Mahasuci dari segala anggapan dan pandangan seperti itu, bahwa Ia punya anak perempuan yaitu malaikat dan bahwa antara Ia dan jin ada hubungan kekerabatan. Bahkan Ia Mahasuci dari apa pun pandangan manusia mengenai diri-Nya, karena keadaan-Nya yang sebenarnya tidak dapat dilukiskan manusia dengan sebenar-benarnya, karena Ia tidak akan dapat ditangkap mata, tidak dapat didengar telinga, dan tidak tergores di dalam hati. Orang yang berpandangan demikian adalah musyrik. \n\nHamba-hamba Allah yang terpilih, yaitu yang telah dijadikan-Nya memiliki sifat ikhlas, tidak akan mempunyai pandangan yang salah tentang-Nya. Mereka selalu mengagungkan-Nya sejauh yang ia mampu mengagungkan-Nya, memuji-Nya sejauh yang ia mampu memuji-Nya, dan melaksanakan perintah-Nya dengan patuh sejauh yang ia mampu melaksanakannya. Begitu pulalah malaikat dalam pandangan mereka. Malaikat bukanlah anak perempuan Allah, tetapi adalah hamba Allah yang selalu menghambakan diri kepada-Nya dan melaksanakan perintah-Nya tanpa pamrih sedikit pun.","Untuk mewujudkan kemenangan itu, Allah meminta Nabi Muhammad agar berpaling dari mereka. Maksudnya yaitu menunjukkan sikap tidak suka pada sikap pembangkangan mereka, tidak menghiraukan ancaman mereka, dan melanjutkan dakwah pada mereka dengan penuh tawakal kepada Allah, sebagaimana diperintahkan Allah dalam ayat lain:\n\nDan janganlah engkau (Muhammad) menuruti orang-orang kafir dan orang-orang munafik itu, janganlah engkau hiraukan gangguan mereka dan bertawakallah kepada Allah. Dan cukuplah Allah sebagai pelindung. (al-A.hzab\u002F33: 48) \n\nDi samping diperintahkan berpaling, Nabi Muhammad juga diperintahkan untuk melihat perkembangan selanjutnya, yaitu menunggu, karena pertolongan Allah pasti datang. Pertolongan itu adalah takluknya kota Mekah, sebagaimana dinyatakan ayat berikut:\n\nApabila telah datang pertolongan Allah dan kemenangan, dan engkau melihat manusia berbondong-bondong masuk agama Allah, maka bertasbihlah dalam dengan Tuhanmu dan mohonlah ampunan kepada-Nya. Sungguh, Dia Maha Penerima tobat. (an-Nashr\u002F110: 1-3)\n\nMereka juga akan melihat perkembangan dan menunggu. Tetapi yang mereka tunggu hanyalah kekalahan.","Setelah orang-orang kafir itu diancam kekalahan di dunia, supaya mereka beriman, mereka diancam dengan azab akhirat. Karena keingkaran atau karena tidak percaya adanya azab akhirat itu, mereka menantang Nabi saw agar menyegerakan terjadinya azab akhirat itu waktu di dunia ini juga. Untuk menjawab tantangan itu, Allah bertanya apakah betul-betul mereka menginginkan azab akhirat itu disegerakan. Allah menyatakan bahwa bila azab akhirat itu disegerakan dan diturunkan ke halaman rumah mereka, maka malapetaka yang menimpa akan tak terkirakan. Yaitu datangnya malapetaka itu pada pagi hari, yakni di saat orang-orang yang diancam itu masih ingin menambah tidurnya menjelang matahari terbit, sehingga mereka belum siap menghadapinya. \n\nHebatnya malapetaka pagi hari dapat diambil contohnya dari serangan Nabi saw terhadap Khaibar di waktu subuh yang mengakibatkan jatuhnya benteng itu:\n\nDari Anas r.a. bahwa ia berkata, \"Rasulullah pada pagi hari berada di Khaibar. Ketika mereka(Yahudi penduduk Khaibar) keluar dengan kampak dan tombak mereka, dan melihat pasukan, mereka lari dan berteriak, 'Muhammad, demi Allah, Muhammad, dan pasukannya!\" Nabi berkata, 'Allah Mahaagung, Khaibar hancur. Kita bila sampai di halaman mereka, itu adalah subuh yang jelek sekali bagi orang-orang yang diancam itu.\" (Riwayat al-Bukhari dan Muslim).","Menghadapi tantangan kaum kafir agar azab akhirat disegerakan bagi mereka, Allah memerintahkan Nabi untuk berpaling, yaitu menunjukkan sikap tidak suka pada sikap pembangkangan mereka, tidak menghiraukan ancaman mereka, dan melanjutkan dakwah kepada mereka dengan penuh tawakal kepada Allah, dan melihat perkembangan selanjutnya, yaitu menunggu. Untuk itu diperlukan sikap sabar dan tawakal sebagaimana sikap yang lalu pada waktu menunggu kehancuran mereka di dunia. Dengan demikian azab akhirat itu pasti mereka terima.","https:\u002F\u002Fschema.org","https:\u002F\u002Fwww.baca-quran.id\u002F37\u002F3\u002F","2021-08-01T10:39:54.934Z")));
export { default } from './InputRating.jsx';
/* * A-Frame. * This version of aframe based on 0.6.1, accepts the GLTF * version 2.0 files and the camera position bug it's fixed. */ require('aframe'); /* * A-Frame Components. * */ require('aframe-template-component') require('@gladeye/aframe-preloader-component') /*! * Bootstrap v3.3.7 (http://getbootstrap.com) * Copyright 2011-2016 Twitter, Inc. * Licensed under the MIT license */ require('bootstrap-sass/assets/javascripts/bootstrap/modal.js');
/** * Turf is a modular geospatial analysis engine written in JavaScript. It performs geospatial * processing tasks with GeoJSON data and can be run on a server or in a browser. * * @module turf * @summary Geospatial analysis for JavaScript */ export {default as isolines} from '@turf/isolines'; export {default as convex} from '@turf/convex'; export {default as pointsWithinPolygon} from '@turf/points-within-polygon'; export {default as concave} from '@turf/concave'; export {default as collect} from '@turf/collect'; export {default as flip} from '@turf/flip'; export {default as simplify} from '@turf/simplify'; export {default as bezierSpline} from '@turf/bezier-spline'; export {default as tag} from '@turf/tag'; export {default as sample} from '@turf/sample'; export {default as envelope} from '@turf/envelope'; export {default as square} from '@turf/square'; export {default as circle} from '@turf/circle'; export {default as midpoint} from '@turf/midpoint'; export {default as center} from '@turf/center'; export {default as centerOfMass} from '@turf/center-of-mass'; export {default as centroid} from '@turf/centroid'; export {default as combine} from '@turf/combine'; export {default as distance} from '@turf/distance'; export {default as explode} from '@turf/explode'; export {default as bbox} from '@turf/bbox'; export {default as tesselate} from '@turf/tesselate'; export {default as bboxPolygon} from '@turf/bbox-polygon'; export {default as booleanPointInPolygon} from '@turf/boolean-point-in-polygon'; export {default as nearestPoint} from '@turf/nearest-point'; export {default as nearestPointOnLine} from '@turf/nearest-point-on-line'; export {default as nearestPointToLine} from '@turf/nearest-point-to-line'; export {default as planepoint} from '@turf/planepoint'; export {default as tin} from '@turf/tin'; export {default as bearing} from '@turf/bearing'; export {default as destination} from '@turf/destination'; export {default as kinks} from '@turf/kinks'; export {default as pointOnFeature} from '@turf/point-on-feature'; export {default as area} from '@turf/area'; export {default as along} from '@turf/along'; export {default as length} from '@turf/length'; export {default as lineSlice} from '@turf/line-slice'; export {default as lineSliceAlong} from '@turf/line-slice-along'; export {default as pointGrid} from '@turf/point-grid'; export {default as truncate} from '@turf/truncate'; export {default as flatten} from '@turf/flatten'; export {default as lineIntersect} from '@turf/line-intersect'; export {default as lineChunk} from '@turf/line-chunk'; export {default as unkinkPolygon} from '@turf/unkink-polygon'; export {default as greatCircle} from '@turf/great-circle'; export {default as lineSegment} from '@turf/line-segment'; export {default as lineSplit} from '@turf/line-split'; export {default as lineArc} from '@turf/line-arc'; export {default as polygonToLine} from '@turf/polygon-to-line'; export {default as lineToPolygon} from '@turf/line-to-polygon'; export {default as bboxClip} from '@turf/bbox-clip'; export {default as lineOverlap} from '@turf/line-overlap'; export {default as sector} from '@turf/sector'; export {default as rhumbBearing} from '@turf/rhumb-bearing'; export {default as rhumbDistance} from '@turf/rhumb-distance'; export {default as rhumbDestination} from '@turf/rhumb-destination'; export {default as polygonTangents} from '@turf/polygon-tangents'; export {default as rewind} from '@turf/rewind'; export {default as isobands} from '@turf/isobands'; export {default as transformRotate} from '@turf/transform-rotate'; export {default as transformScale} from '@turf/transform-scale'; export {default as transformTranslate} from '@turf/transform-translate'; export {default as lineOffset} from '@turf/line-offset'; export {default as polygonize} from '@turf/polygonize'; export {default as booleanDisjoint} from '@turf/boolean-disjoint'; export {default as booleanContains} from '@turf/boolean-contains'; export {default as booleanCrosses} from '@turf/boolean-crosses'; export {default as booleanClockwise} from '@turf/boolean-clockwise'; export {default as booleanOverlap} from '@turf/boolean-overlap'; export {default as booleanPointOnLine} from '@turf/boolean-point-on-line'; export {default as booleanEqual} from '@turf/boolean-equal'; export {default as booleanWithin} from '@turf/boolean-within'; export {default as clone} from '@turf/clone'; export {default as cleanCoords} from '@turf/clean-coords'; export {default as clustersDbscan} from '@turf/clusters-dbscan'; export {default as clustersKmeans} from '@turf/clusters-kmeans'; export {default as pointToLineDistance} from '@turf/point-to-line-distance'; export {default as booleanParallel} from '@turf/boolean-parallel'; export {default as shortestPath} from '@turf/shortest-path'; export {default as voronoi} from '@turf/voronoi'; export {default as ellipse} from '@turf/ellipse'; export {default as centerMean} from '@turf/center-mean'; export {default as centerMedian} from '@turf/center-median'; export {default as standardDeviationalEllipse} from '@turf/standard-deviational-ellipse'; export * from '@turf/projection'; export * from '@turf/random'; export * from '@turf/clusters'; export * from '@turf/helpers'; export * from '@turf/invariant'; export * from '@turf/meta'; import * as projection from '@turf/projection'; import * as random from '@turf/random'; import * as clusters from '@turf/clusters'; import * as helpers from '@turf/helpers'; import * as invariant from '@turf/invariant'; import * as meta from '@turf/meta'; export {projection, random, clusters, helpers, invariant, meta}; // JSTS Modules export {default as difference} from '@turf/difference'; export {default as buffer} from '@turf/buffer'; export {default as union} from '@turf/union'; export {default as intersect} from '@turf/intersect'; // JSTS Sub-Models export {default as dissolve} from '@turf/dissolve'; export {default as hexGrid} from '@turf/hex-grid'; export {default as mask} from '@turf/mask'; export {default as squareGrid} from '@turf/square-grid'; export {default as triangleGrid} from '@turf/triangle-grid'; export {default as interpolate} from '@turf/interpolate'; // Renamed modules (Backwards compatitble with v4.0) // https://github.com/Turfjs/turf/issues/860 export {default as pointOnSurface} from '@turf/point-on-feature'; export {default as polygonToLineString} from '@turf/polygon-to-line'; export {default as lineStringToPolygon} from '@turf/line-to-polygon'; export {default as inside} from '@turf/boolean-point-in-polygon'; export {default as within} from '@turf/points-within-polygon'; export {default as bezier} from '@turf/bezier-spline'; export {default as nearest} from '@turf/nearest-point'; export {default as pointOnLine} from '@turf/nearest-point-on-line'; export {default as lineDistance} from '@turf/length'; // Renamed methods (Backwards compatitble with v4.0) // https://github.com/Turfjs/turf/issues/860 export { radiansToDegrees as radians2degrees, degreesToRadians as degrees2radians, lengthToDegrees as distanceToDegrees, lengthToRadians as distanceToRadians, radiansToLength as radiansToDistance, bearingToAzimuth as bearingToAngle, convertLength as convertDistance } from '@turf/helpers';
$.fn.parseForm = function () { let serializeObj = {}; let array = this.serializeArray(); $(array).each(function () { if (serializeObj[this.name]) { if ($.isArray(serializeObj[this.name])) { serializeObj[this.name].push(this.value); } else { serializeObj[this.name] = [serializeObj[this.name], this.value]; } } else { serializeObj[this.name] = this.value; } }); return serializeObj; }; module.exports = function (index = 0) { let frm = $(document.forms[index]); return { reset: function () { frm[0].reset(); }, submit: function (url, before, after) { let data = frm.parseForm(); // 前切片 if (!!before) { let result = before(data); // 当返回 false 的时候,直接退出博客提交 if (!result) return; } $.post({ url: url, data: data, parse: true, dataType: "json", success: function (req) { // 后切片 if (after) after(req); } }); } }; };